forked from github-starred/komodo
Compare commits
36 Commits
v2.0.0-dev
...
v1.17.0-de
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
51b16434a5 | ||
|
|
f49d73dba3 | ||
|
|
62723fa32f | ||
|
|
d1d17626b2 | ||
|
|
312f51e50b | ||
|
|
d68ea5cf45 | ||
|
|
71fc0bba97 | ||
|
|
1a7a0299de | ||
|
|
cd59da100f | ||
|
|
5a8ed8b81d | ||
|
|
4c9479a8bc | ||
|
|
19aa5fb260 | ||
|
|
75c0c967ac | ||
|
|
8832a05ffe | ||
|
|
7598978cd1 | ||
|
|
b868ad5794 | ||
|
|
be6a0db511 | ||
|
|
c360289984 | ||
|
|
23abdb85b0 | ||
|
|
b510d6dc41 | ||
|
|
f29c60a6e9 | ||
|
|
ee33fc98d9 | ||
|
|
729ffc9c3c | ||
|
|
395930094d | ||
|
|
79a16c6f22 | ||
|
|
4e4aa8c567 | ||
|
|
bfbcd33c1a | ||
|
|
01e2e0be11 | ||
|
|
2e6e409b85 | ||
|
|
30e87b6aff | ||
|
|
8423a53106 | ||
|
|
9e6c122313 | ||
|
|
d03d0b3b78 | ||
|
|
1bf76f1b57 | ||
|
|
7815639aeb | ||
|
|
af9fbf9667 |
@@ -23,7 +23,7 @@ services:
|
||||
|
||||
db:
|
||||
extends:
|
||||
file: ../dev.compose.yaml
|
||||
file: ../test.compose.yaml
|
||||
service: ferretdb
|
||||
|
||||
volumes:
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/node:1": {
|
||||
"version": "20.12.2"
|
||||
"version": "18.18.0"
|
||||
},
|
||||
"ghcr.io/devcontainers-community/features/deno:1": {
|
||||
|
||||
|
||||
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@@ -1,2 +0,0 @@
|
||||
# https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/displaying-a-sponsor-button-in-your-repository
|
||||
open_collective: komodo
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
target
|
||||
node_modules
|
||||
dist
|
||||
deno.lock
|
||||
.env
|
||||
.env.development
|
||||
.DS_Store
|
||||
@@ -10,4 +9,5 @@ deno.lock
|
||||
/frontend/build
|
||||
/lib/ts_client/build
|
||||
|
||||
creds.toml
|
||||
.dev
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
.dev
|
||||
10
.vscode/resolver.code-snippets
vendored
10
.vscode/resolver.code-snippets
vendored
@@ -3,8 +3,8 @@
|
||||
"scope": "rust",
|
||||
"prefix": "resolve",
|
||||
"body": [
|
||||
"impl Resolve<${0}> for ${1} {",
|
||||
"\tasync fn resolve(self, _: &${0}) -> Result<Self::Response, Self::Error> {",
|
||||
"impl Resolve<${1}, User> for State {",
|
||||
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
|
||||
"\t\ttodo!()",
|
||||
"\t}",
|
||||
"}"
|
||||
@@ -15,9 +15,9 @@
|
||||
"prefix": "static",
|
||||
"body": [
|
||||
"fn ${1}() -> &'static ${2} {",
|
||||
"\tstatic ${0}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${0}.get_or_init(|| {",
|
||||
"\t\ttodo!()",
|
||||
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${3}.get_or_init(|| {",
|
||||
"\t\t${0}",
|
||||
"\t})",
|
||||
"}"
|
||||
]
|
||||
|
||||
3784
Cargo.lock
generated
3784
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
133
Cargo.toml
133
Cargo.toml
@@ -8,142 +8,109 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "2.0.0-dev-90"
|
||||
edition = "2024"
|
||||
version = "1.17.0-dev-1"
|
||||
edition = "2021"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/moghtech/komodo"
|
||||
homepage = "https://komo.do"
|
||||
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
komodo_client = { path = "client/core/rs" }
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
environment_file = { path = "lib/environment_file" }
|
||||
environment = { path = "lib/environment" }
|
||||
interpolate = { path = "lib/interpolate" }
|
||||
secret_file = { path = "lib/secret_file" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
transport = { path = "lib/transport" }
|
||||
database = { path = "lib/database" }
|
||||
encoding = { path = "lib/encoding" }
|
||||
response = { path = "lib/response" }
|
||||
command = { path = "lib/command" }
|
||||
config = { path = "lib/config" }
|
||||
logger = { path = "lib/logger" }
|
||||
cache = { path = "lib/cache" }
|
||||
noise = { path = "lib/noise" }
|
||||
git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
serror = { version = "0.5.3", default-features = false }
|
||||
slack = { version = "2.0.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.5.0", default-features = false }
|
||||
slack = { version = "0.3.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
async_timing_util = "1.1.0"
|
||||
merge_config_files = "0.1.5"
|
||||
async_timing_util = "1.0.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "2.0.2"
|
||||
mongo_indexed = "2.0.1"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "2.0.0"
|
||||
mungos = "3.2.2"
|
||||
svi = "1.2.0"
|
||||
toml_pretty = "1.1.2"
|
||||
mungos = "1.1.0"
|
||||
svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.48.0", features = ["full"] }
|
||||
tokio-util = { version = "0.7.17", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
pin-project-lite = "0.2.16"
|
||||
reqwest = { version = "0.12.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
tokio-util = "0.7.13"
|
||||
futures = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
arc-swap = "1.7.1"
|
||||
|
||||
# SERVER
|
||||
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.12.1", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
|
||||
axum-extra = { version = "0.10.0", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.2", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.1", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.1", features = ["ws", "json", "macros"] }
|
||||
tokio-tungstenite = "0.26.1"
|
||||
|
||||
# SER/DE
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
indexmap = { version = "2.12.0", features = ["serde"] }
|
||||
serde = { version = "1.0.227", features = ["derive"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
bson = { version = "2.15.0" } # must keep in sync with mongodb version
|
||||
serde_yaml_ng = "0.10.0"
|
||||
serde_json = "1.0.145"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.9.8"
|
||||
url = "2.5.7"
|
||||
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
strum = { version = "0.26.3", features = ["derive"] }
|
||||
serde_json = "1.0.135"
|
||||
serde_yaml = "0.9.34"
|
||||
toml = "0.8.19"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.100"
|
||||
thiserror = "2.0.17"
|
||||
anyhow = "1.0.95"
|
||||
thiserror = "2.0.11"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
opentelemetry = "0.31.0"
|
||||
opentelemetry-otlp = { version = "0.27.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.27.1", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.27.0"
|
||||
tracing-opentelemetry = "0.28.0"
|
||||
opentelemetry = "0.27.1"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.51", features = ["derive"] }
|
||||
clap = { version = "4.5.26", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO / AUTH
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] } # locked back with octorust
|
||||
rustls = { version = "0.23.35", features = ["aws-lc-rs"] }
|
||||
pem-rfc7468 = { version = "1.0.0", features = ["alloc"] }
|
||||
openidconnect = "4.0.1"
|
||||
uuid = { version = "1.12.0", features = ["v4", "fast-rng", "serde"] }
|
||||
openidconnect = "3.5.0"
|
||||
urlencoding = "2.1.3"
|
||||
bcrypt = "0.17.1"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.16.0"
|
||||
base64 = "0.22.1"
|
||||
pkcs8 = "0.10.2"
|
||||
snow = "0.10.0"
|
||||
rustls = "0.23.21"
|
||||
hmac = "0.12.1"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
rand = "0.9.2"
|
||||
sha2 = "0.10.8"
|
||||
rand = "0.8.5"
|
||||
jwt = "0.16.0"
|
||||
hex = "0.4.3"
|
||||
spki = "0.7.3"
|
||||
der = "0.7.10"
|
||||
|
||||
# SYSTEM
|
||||
hickory-resolver = "0.25.2"
|
||||
portable-pty = "0.9.0"
|
||||
shell-escape = "0.1.5"
|
||||
crossterm = "0.29.0"
|
||||
bollard = "0.19.4"
|
||||
sysinfo = "0.37.1"
|
||||
shlex = "1.3.0"
|
||||
bollard = "0.18.1"
|
||||
sysinfo = "0.33.1"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.8.10"
|
||||
aws-sdk-ec2 = "1.184.0"
|
||||
aws-credential-types = "1.2.9"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.6"
|
||||
chrono-tz = "0.10.4"
|
||||
chrono = "0.4.42"
|
||||
croner = "3.0.1"
|
||||
aws-config = "1.5.13"
|
||||
aws-sdk-ec2 = "1.101.0"
|
||||
|
||||
# MISC
|
||||
async-compression = { version = "0.4.33", features = ["tokio", "gzip"] }
|
||||
derive_builder = "0.20.2"
|
||||
comfy-table = "7.2.1"
|
||||
typeshare = "1.0.4"
|
||||
octorust = "0.9.0"
|
||||
dashmap = "6.1.0"
|
||||
wildcard = "0.3.0"
|
||||
colored = "3.0.0"
|
||||
bytes = "1.10.1"
|
||||
regex = "1.12.2"
|
||||
regex = "1.11.1"
|
||||
bson = "2.13.0"
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("build-komodo");
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"imports": {
|
||||
"@std/toml": "jsr:@std/toml"
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
const cmd = "km run -y action deploy-komodo-fe-change";
|
||||
new Deno.Command("bash", {
|
||||
args: ["-c", cmd],
|
||||
}).spawn();
|
||||
@@ -1,2 +0,0 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("deploy-komodo");
|
||||
@@ -1,52 +0,0 @@
|
||||
import * as TOML from "@std/toml";
|
||||
|
||||
export const run = async (action: string) => {
|
||||
const branch = await new Deno.Command("bash", {
|
||||
args: ["-c", "git rev-parse --abbrev-ref HEAD"],
|
||||
})
|
||||
.output()
|
||||
.then((r) => new TextDecoder("utf-8").decode(r.stdout).trim());
|
||||
|
||||
const cargo_toml_str = await Deno.readTextFile("Cargo.toml");
|
||||
const prev_version = (
|
||||
TOML.parse(cargo_toml_str) as {
|
||||
workspace: { package: { version: string } };
|
||||
}
|
||||
).workspace.package.version;
|
||||
|
||||
const [version, tag, count] = prev_version.split("-");
|
||||
const next_count = Number(count) + 1;
|
||||
|
||||
const next_version = `${version}-${tag}-${next_count}`;
|
||||
|
||||
await Deno.writeTextFile(
|
||||
"Cargo.toml",
|
||||
cargo_toml_str.replace(
|
||||
`version = "${prev_version}"`,
|
||||
`version = "${next_version}"`
|
||||
)
|
||||
);
|
||||
|
||||
// Cargo check first here to make sure lock file is updated before commit.
|
||||
const cmd = `
|
||||
cargo check
|
||||
echo ""
|
||||
|
||||
git add --all
|
||||
git commit --all --message "deploy ${version}-${tag}-${next_count}"
|
||||
|
||||
echo ""
|
||||
git push
|
||||
echo ""
|
||||
|
||||
km run -y action ${action} "KOMODO_BRANCH=${branch}&KOMODO_VERSION=${version}&KOMODO_TAG=${tag}-${next_count}"
|
||||
`
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0 && !line.startsWith("//"))
|
||||
.join(" && ");
|
||||
|
||||
new Deno.Command("bash", {
|
||||
args: ["-c", cmd],
|
||||
}).spawn();
|
||||
};
|
||||
@@ -1,8 +1,7 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## Builds the Komodo Core and Periphery binaries
|
||||
## for a specific architecture.
|
||||
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
FROM rust:1.84.1-bullseye AS builder
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -11,22 +10,18 @@ COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/core ./bin/core
|
||||
COPY ./bin/periphery ./bin/periphery
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN \
|
||||
cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_periphery --release && \
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
cargo build -p komodo_periphery --release
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Periphery"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
@@ -1,36 +0,0 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
## Uses chef for dependency caching to help speed up back-to-back builds.
|
||||
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
|
||||
WORKDIR /builder
|
||||
|
||||
# Plan just the RECIPE to see if things have changed
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef AS builder
|
||||
RUN cargo install cargo-strip
|
||||
COPY --from=planner /builder/recipe.json recipe.json
|
||||
# Build JUST dependencies - cached layer
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
# NOW copy again (this time into builder) and build app
|
||||
COPY . .
|
||||
RUN \
|
||||
cargo build --release --bin core && \
|
||||
cargo build --release --bin periphery && \
|
||||
cargo build --release --bin km && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -1,39 +1,30 @@
|
||||
[package]
|
||||
name = "komodo_cli"
|
||||
description = "Command line tool for Komodo"
|
||||
description = "Command line tool to execute Komodo actions"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "km"
|
||||
name = "komodo"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
environment_file.workspace = true
|
||||
# komodo_client = "1.16.12"
|
||||
komodo_client.workspace = true
|
||||
database.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
noise.workspace = true
|
||||
# external
|
||||
futures-util.workspace = true
|
||||
comfy-table.workspace = true
|
||||
tokio-util.workspace = true
|
||||
serde_json.workspace = true
|
||||
crossterm.workspace = true
|
||||
serde_qs.workspace = true
|
||||
wildcard.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
merge_config_files.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
colored.workspace = true
|
||||
dotenvy.workspace = true
|
||||
anyhow.workspace = true
|
||||
chrono.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
clap.workspace = true
|
||||
envy.workspace = true
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY ./lib ./lib
|
||||
COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN cargo build -p komodo_cli --release && cargo strip
|
||||
|
||||
# Copy binaries to distroless base
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=builder /builder/target/release/km /usr/local/bin/km
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -1,135 +0,0 @@
|
||||
# Copy Database Utility
|
||||
|
||||
Copy the Komodo database contents between running, mongo-compatible databases.
|
||||
Can be used to move between MongoDB / FerretDB, or upgrade from FerretDB v1 to v2.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
|
||||
copy_database:
|
||||
image: ghcr.io/moghtech/komodo-cli
|
||||
command: km database copy -y
|
||||
environment:
|
||||
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@source:27017
|
||||
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@target:27017
|
||||
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
|
||||
```
|
||||
|
||||
## FerretDB v2 Update Guide
|
||||
|
||||
Up to Komodo 1.17.5, users who wanted to use Postgres / Sqlite were instructed to deploy FerretDB v1.
|
||||
Now that v2 is out however, v1 will go largely unsupported. Users are recommended to migrate to v2 for
|
||||
the best performance and ongoing support / updates, however the internal data structures
|
||||
have changed and this cannot be done in-place.
|
||||
|
||||
Also note that FerretDB v2 no longer supports Sqlite, and only supports
|
||||
a [customized Postgres distribution](https://docs.ferretdb.io/installation/documentdb/docker/).
|
||||
Nonetheless, it remains a solid option for hosts which [do not support mongo](https://github.com/moghtech/komodo/issues/59).
|
||||
|
||||
Also note, the same basic process outlined below can also be used to move between MongoDB and FerretDB, just replace FerretDB v2
|
||||
with the database you wish to move to.
|
||||
|
||||
### **Step 1**: *Add* the new database to the top of your existing Komodo compose file.
|
||||
|
||||
**Don't forget to also add the new volumes.**
|
||||
|
||||
```yaml
|
||||
## In Komodo compose.yaml
|
||||
services:
|
||||
postgres2:
|
||||
# Recommended: Pin to a specific version
|
||||
# https://github.com/FerretDB/documentdb/pkgs/container/postgres-documentdb
|
||||
image: ghcr.io/ferretdb/postgres-documentdb
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
restart: unless-stopped
|
||||
# ports:
|
||||
# - 5432:5432
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_USER: ${KOMODO_DB_USERNAME}
|
||||
POSTGRES_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||
POSTGRES_DB: postgres # Do not change
|
||||
|
||||
ferretdb2:
|
||||
# Recommended: Pin to a specific version
|
||||
# https://github.com/FerretDB/FerretDB/pkgs/container/ferretdb
|
||||
image: ghcr.io/ferretdb/ferretdb
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- postgres2
|
||||
# ports:
|
||||
# - 27017:27017
|
||||
volumes:
|
||||
- ferretdb-state:/state
|
||||
environment:
|
||||
FERRETDB_POSTGRESQL_URL: postgres://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@postgres2:5432/postgres
|
||||
|
||||
...(unchanged)
|
||||
|
||||
volumes:
|
||||
...(unchanged)
|
||||
postgres-data:
|
||||
ferretdb-state:
|
||||
```
|
||||
|
||||
### **Step 2**: *Add* the database copy utility to Komodo compose file.
|
||||
|
||||
The SOURCE_URI points to the existing database, ie the old FerretDB v1, and it depends
|
||||
on whether it was deployed using Postgres or Sqlite. The example below uses the Postgres one,
|
||||
but if you use Sqlite it should just be something like `mongodb://ferretdb:27017`.
|
||||
|
||||
```yaml
|
||||
## In Komodo compose.yaml
|
||||
services:
|
||||
...(new database)
|
||||
|
||||
copy_database:
|
||||
image: ghcr.io/moghtech/komodo-cli
|
||||
command: km database copy -y
|
||||
environment:
|
||||
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN
|
||||
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb2:27017
|
||||
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
|
||||
...(unchanged)
|
||||
```
|
||||
|
||||
### **Step 3**: *Compose Up* the new additions
|
||||
|
||||
Run `docker compose -p komodo --env-file compose.env -f xxxxx.compose.yaml up -d`, filling in the name of your compose.yaml.
|
||||
This will start up both the old and new database, and copy the data to the new one.
|
||||
|
||||
Wait a few moments for the `copy_database` service to finish. When it exits,
|
||||
confirm the logs show the data was moved successfully, and move on to the next step.
|
||||
|
||||
### **Step 4**: Point Komodo Core to the new database
|
||||
|
||||
In your Komodo compose.yaml, first *comment out* the `copy_database` service and old ferretdb v1 service/s.
|
||||
Then update the `core` service environment to point to `ferretdb2`.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
...
|
||||
|
||||
core:
|
||||
...(unchanged)
|
||||
environment:
|
||||
KOMODO_DATABASE_ADDRESS: ferretdb2:27017
|
||||
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
|
||||
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||
```
|
||||
|
||||
### **Step 5**: Final *Compose Up*
|
||||
|
||||
Repeat the same `docker compose` command as before to apply the changes, and then try navigating to your Komodo web page.
|
||||
If it works, congrats, **you are done**. You can clean up the compose file if you would like, removing the old volumes etc.
|
||||
|
||||
If it does not work, check the logs for any obvious issues, and if necessary you can undo the previous steps
|
||||
to go back to using the previous database.
|
||||
@@ -1,29 +0,0 @@
|
||||
## Assumes the latest binaries for x86_64 and aarch64 are already built (by binaries.Dockerfile).
|
||||
## Since theres no heavy build here, QEMU multi-arch builds are fine for this image.
|
||||
|
||||
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
|
||||
ARG X86_64_BINARIES=${BINARIES_IMAGE}-x86_64
|
||||
ARG AARCH64_BINARIES=${BINARIES_IMAGE}-aarch64
|
||||
|
||||
# This is required to work with COPY --from
|
||||
FROM ${X86_64_BINARIES} AS x86_64
|
||||
FROM ${AARCH64_BINARIES} AS aarch64
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
## Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /km /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /km /app/arch/linux/arm64
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/arch
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -1,4 +0,0 @@
|
||||
[install-cli]
|
||||
alias = "ic"
|
||||
description = "installs the komodo-cli, available on the command line as 'km'"
|
||||
cmd = "cargo install --path ."
|
||||
@@ -1,18 +0,0 @@
|
||||
## Assumes the latest binaries for the required arch are already built (by binaries.Dockerfile).
|
||||
|
||||
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
|
||||
|
||||
# This is required to work with COPY --from
|
||||
FROM ${BINARIES_IMAGE} AS binaries
|
||||
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=binaries /km /usr/local/bin/km
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
55
bin/cli/src/args.rs
Normal file
55
bin/cli/src/args.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use komodo_client::api::execute::Execution;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
pub struct CliArgs {
|
||||
/// Sync or Exec
|
||||
#[command(subcommand)]
|
||||
pub command: Command,
|
||||
|
||||
/// The path to a creds file.
|
||||
///
|
||||
/// Note: If each of `url`, `key` and `secret` are passed,
|
||||
/// no file is required at this path.
|
||||
#[arg(long, default_value_t = default_creds())]
|
||||
pub creds: String,
|
||||
|
||||
/// Pass url in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub url: Option<String>,
|
||||
/// Pass api key in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub key: Option<String>,
|
||||
/// Pass api secret in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub secret: Option<String>,
|
||||
|
||||
/// Always continue on user confirmation prompts.
|
||||
#[arg(long, short, default_value_t = false)]
|
||||
pub yes: bool,
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home =
|
||||
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
|
||||
format!("{home}/.config/komodo/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
pub enum Command {
|
||||
/// Runs an execution
|
||||
Execute {
|
||||
#[command(subcommand)]
|
||||
execution: Execution,
|
||||
},
|
||||
// Room for more
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CredsFile {
|
||||
pub url: String,
|
||||
pub key: String,
|
||||
pub secret: String,
|
||||
}
|
||||
@@ -1,314 +0,0 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Color};
|
||||
use futures_util::{
|
||||
FutureExt, TryStreamExt, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
InspectDockerContainer, ListAllDockerContainers, ListServers,
|
||||
},
|
||||
entities::{
|
||||
config::cli::args::container::{
|
||||
Container, ContainerCommand, InspectContainer,
|
||||
},
|
||||
docker::{
|
||||
self,
|
||||
container::{ContainerListItem, ContainerStateStatusEnum},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
command::{
|
||||
PrintTable, clamp_sha, matches_wildcards, parse_wildcards,
|
||||
print_items,
|
||||
},
|
||||
config::cli_config,
|
||||
};
|
||||
|
||||
pub async fn handle(container: &Container) -> anyhow::Result<()> {
|
||||
match &container.command {
|
||||
None => list_containers(container).await,
|
||||
Some(ContainerCommand::Inspect(inspect)) => {
|
||||
inspect_container(inspect).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_containers(
|
||||
Container {
|
||||
all,
|
||||
down,
|
||||
links,
|
||||
reverse,
|
||||
containers: names,
|
||||
images,
|
||||
networks,
|
||||
servers,
|
||||
format,
|
||||
command: _,
|
||||
}: &Container,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = super::komodo_client().await?;
|
||||
let (server_map, containers) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers::default())
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default(),
|
||||
containers: Default::default(),
|
||||
}),
|
||||
)?;
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
let containers = containers.into_iter().map(|c| {
|
||||
let server = if let Some(server_id) = c.server_id.as_ref()
|
||||
&& let Some(server) = server_map.get(server_id)
|
||||
{
|
||||
server
|
||||
} else {
|
||||
return (None, c);
|
||||
};
|
||||
(Some(server.name.as_str()), c)
|
||||
});
|
||||
|
||||
let names = parse_wildcards(names);
|
||||
let servers = parse_wildcards(servers);
|
||||
let images = parse_wildcards(images);
|
||||
let networks = parse_wildcards(networks);
|
||||
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.filter(|(server_name, c)| {
|
||||
let state_check = if *all {
|
||||
true
|
||||
} else if *down {
|
||||
!matches!(c.state, ContainerStateStatusEnum::Running)
|
||||
} else {
|
||||
matches!(c.state, ContainerStateStatusEnum::Running)
|
||||
};
|
||||
let network_check = matches_wildcards(
|
||||
&networks,
|
||||
&c.network_mode
|
||||
.as_deref()
|
||||
.map(|n| vec![n])
|
||||
.unwrap_or_default(),
|
||||
) || matches_wildcards(
|
||||
&networks,
|
||||
&c.networks.iter().map(String::as_str).collect::<Vec<_>>(),
|
||||
);
|
||||
state_check
|
||||
&& network_check
|
||||
&& matches_wildcards(&names, &[c.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&server_name
|
||||
.as_deref()
|
||||
.map(|i| vec![i])
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
&& matches_wildcards(
|
||||
&images,
|
||||
&c.image.as_deref().map(|i| vec![i]).unwrap_or_default(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
containers.sort_by(|(a_s, a), (b_s, b)| {
|
||||
a.state
|
||||
.cmp(&b.state)
|
||||
.then(a.name.cmp(&b.name))
|
||||
.then(a_s.cmp(b_s))
|
||||
.then(a.network_mode.cmp(&b.network_mode))
|
||||
.then(a.image.cmp(&b.image))
|
||||
});
|
||||
if *reverse {
|
||||
containers.reverse();
|
||||
}
|
||||
print_items(containers, *format, *links)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn inspect_container(
|
||||
inspect: &InspectContainer,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = super::komodo_client().await?;
|
||||
let (server_map, mut containers) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers::default())
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default(),
|
||||
containers: Default::default()
|
||||
}),
|
||||
)?;
|
||||
|
||||
containers.iter_mut().for_each(|c| {
|
||||
let Some(server_id) = c.server_id.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let Some(server) = server_map.get(server_id) else {
|
||||
c.server_id = Some(String::from("Unknown"));
|
||||
return;
|
||||
};
|
||||
c.server_id = Some(server.name.clone());
|
||||
});
|
||||
|
||||
let names = [inspect.container.to_string()];
|
||||
let names = parse_wildcards(&names);
|
||||
let servers = parse_wildcards(&inspect.servers);
|
||||
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.filter(|c| {
|
||||
matches_wildcards(&names, &[c.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&c.server_id
|
||||
.as_deref()
|
||||
.map(|i| vec![i])
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
})
|
||||
.map(|c| async move {
|
||||
client
|
||||
.read(InspectDockerContainer {
|
||||
container: c.name,
|
||||
server: c.server_id.context("No server...")?,
|
||||
})
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
|
||||
containers.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
match containers.len() {
|
||||
0 => {
|
||||
println!(
|
||||
"{}: Did not find any containers matching '{}'",
|
||||
"INFO".green(),
|
||||
inspect.container.bold()
|
||||
);
|
||||
}
|
||||
1 => {
|
||||
println!("{}", serialize_container(inspect, &containers[0])?);
|
||||
}
|
||||
_ => {
|
||||
let containers = containers
|
||||
.iter()
|
||||
.map(|c| serialize_container(inspect, c))
|
||||
.collect::<anyhow::Result<Vec<_>>>()?
|
||||
.join("\n");
|
||||
println!("{containers}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_container(
|
||||
inspect: &InspectContainer,
|
||||
container: &docker::container::Container,
|
||||
) -> anyhow::Result<String> {
|
||||
let res = if inspect.state {
|
||||
serde_json::to_string_pretty(&container.state)
|
||||
} else if inspect.mounts {
|
||||
serde_json::to_string_pretty(&container.mounts)
|
||||
} else if inspect.host_config {
|
||||
serde_json::to_string_pretty(&container.host_config)
|
||||
} else if inspect.config {
|
||||
serde_json::to_string_pretty(&container.config)
|
||||
} else if inspect.network_settings {
|
||||
serde_json::to_string_pretty(&container.network_settings)
|
||||
} else {
|
||||
serde_json::to_string_pretty(container)
|
||||
}
|
||||
.context("Failed to serialize items to JSON")?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&[
|
||||
"Container",
|
||||
"State",
|
||||
"Server",
|
||||
"Ports",
|
||||
"Networks",
|
||||
"Image",
|
||||
"Link",
|
||||
]
|
||||
} else {
|
||||
&["Container", "State", "Server", "Ports", "Networks", "Image"]
|
||||
}
|
||||
}
|
||||
fn row(self, links: bool) -> Vec<Cell> {
|
||||
let color = match self.1.state {
|
||||
ContainerStateStatusEnum::Running => Color::Green,
|
||||
ContainerStateStatusEnum::Paused => Color::DarkYellow,
|
||||
ContainerStateStatusEnum::Empty => Color::Grey,
|
||||
_ => Color::Red,
|
||||
};
|
||||
let mut networks = HashSet::new();
|
||||
if let Some(network) = self.1.network_mode {
|
||||
networks.insert(network);
|
||||
}
|
||||
for network in self.1.networks {
|
||||
networks.insert(network);
|
||||
}
|
||||
let mut networks = networks.into_iter().collect::<Vec<_>>();
|
||||
networks.sort();
|
||||
let mut ports = self
|
||||
.1
|
||||
.ports
|
||||
.into_iter()
|
||||
.flat_map(|p| p.public_port.map(|p| p.to_string()))
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
ports.sort();
|
||||
let ports = if ports.is_empty() {
|
||||
Cell::new("")
|
||||
} else {
|
||||
Cell::new(format!(":{}", ports.join(", :")))
|
||||
};
|
||||
|
||||
let image = self.1.image.as_deref().unwrap_or("Unknown");
|
||||
let mut res = vec![
|
||||
Cell::new(self.1.name.clone()).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.1.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.0.unwrap_or("Unknown")),
|
||||
ports,
|
||||
Cell::new(networks.join(", ")),
|
||||
Cell::new(clamp_sha(image)),
|
||||
];
|
||||
if !links {
|
||||
return res;
|
||||
}
|
||||
let link = if let Some(server_id) = self.1.server_id {
|
||||
format!(
|
||||
"{}/servers/{server_id}/container/{}",
|
||||
cli_config().host,
|
||||
self.1.name
|
||||
)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
res.push(Cell::new(link));
|
||||
res
|
||||
}
|
||||
}
|
||||
@@ -1,366 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::entities::{
|
||||
config::cli::args::database::DatabaseCommand, optional_string,
|
||||
};
|
||||
|
||||
use crate::{command::sanitize_uri, config::cli_config};
|
||||
|
||||
pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
|
||||
match command {
|
||||
DatabaseCommand::Backup { yes, .. } => backup(*yes).await,
|
||||
DatabaseCommand::Restore {
|
||||
restore_folder,
|
||||
index,
|
||||
yes,
|
||||
..
|
||||
} => restore(restore_folder.as_deref(), *index, *yes).await,
|
||||
DatabaseCommand::Prune { yes, .. } => prune(*yes).await,
|
||||
DatabaseCommand::Copy { yes, index, .. } => {
|
||||
copy(*index, *yes).await
|
||||
}
|
||||
DatabaseCommand::V1Downgrade { yes } => v1_downgrade(*yes).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn backup(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Backup".green().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Backup all database contents to gzip compressed files."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
println!(
|
||||
"{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if config.max_backups == 0 {
|
||||
println!(
|
||||
"{}{}",
|
||||
" - Backup pruning".dimmed(),
|
||||
"disabled".red().dimmed()
|
||||
);
|
||||
} else {
|
||||
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start backup", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
database::utils::backup(&db, &config.backups_folder).await?;
|
||||
|
||||
// Early return if backup pruning disabled
|
||||
if config.max_backups == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Know that new backup was taken successfully at this point,
|
||||
// safe to prune old backup folders
|
||||
|
||||
prune_inner().await
|
||||
}
|
||||
|
||||
async fn restore(
|
||||
restore_folder: Option<&Path>,
|
||||
index: bool,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Restore".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Restores database contents from gzip compressed files."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database_target.uri) {
|
||||
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) =
|
||||
optional_string(&config.database_target.address)
|
||||
{
|
||||
println!("{}: {address}", " - Target Address".dimmed());
|
||||
}
|
||||
if let Some(username) =
|
||||
optional_string(&config.database_target.username)
|
||||
{
|
||||
println!("{}: {username}", " - Target Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Name".dimmed(),
|
||||
config.database_target.db_name,
|
||||
);
|
||||
if !index {
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Indexing".dimmed(),
|
||||
"DISABLED".red(),
|
||||
);
|
||||
}
|
||||
println!(
|
||||
"\n{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if let Some(restore_folder) = restore_folder {
|
||||
println!("{}: {restore_folder:?}", " - Restore Folder".dimmed());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start restore", yes)?;
|
||||
|
||||
let db = if index {
|
||||
database::Client::new(&config.database_target).await?.db
|
||||
} else {
|
||||
database::init(&config.database_target).await?
|
||||
};
|
||||
|
||||
database::utils::restore(
|
||||
&db,
|
||||
&config.backups_folder,
|
||||
restore_folder,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn prune(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Backup Prune".cyan().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Prunes database backup folders when greater than the configured amount."
|
||||
.dimmed()
|
||||
);
|
||||
println!(
|
||||
"{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if config.max_backups == 0 {
|
||||
println!(
|
||||
"{}{}",
|
||||
" - Backup pruning".dimmed(),
|
||||
"disabled".red().dimmed()
|
||||
);
|
||||
} else {
|
||||
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
|
||||
}
|
||||
|
||||
// Early return if backup pruning disabled
|
||||
if config.max_backups == 0 {
|
||||
info!(
|
||||
"Backup pruning is disabled, enabled using 'max_backups' (KOMODO_CLI_MAX_BACKUPS)"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start backup prune", yes)?;
|
||||
|
||||
prune_inner().await
|
||||
}
|
||||
|
||||
async fn prune_inner() -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
let mut backups_dir =
|
||||
match tokio::fs::read_dir(&config.backups_folder)
|
||||
.await
|
||||
.context("Failed to read backups folder for prune")
|
||||
{
|
||||
Ok(backups_dir) => backups_dir,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let mut backup_folders = Vec::new();
|
||||
loop {
|
||||
match backups_dir.next_entry().await {
|
||||
Ok(Some(entry)) => {
|
||||
let Ok(metadata) = entry.metadata().await else {
|
||||
continue;
|
||||
};
|
||||
if metadata.is_dir() {
|
||||
backup_folders.push(entry.path());
|
||||
}
|
||||
}
|
||||
Ok(None) => break,
|
||||
Err(_) => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ordered from oldest -> newest
|
||||
backup_folders.sort();
|
||||
|
||||
let max_backups = config.max_backups as usize;
|
||||
let backup_folders_len = backup_folders.len();
|
||||
|
||||
// Early return if under the backup count threshold
|
||||
if backup_folders_len <= max_backups {
|
||||
info!("No backups to prune");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let to_delete =
|
||||
&backup_folders[..(backup_folders_len - max_backups)];
|
||||
|
||||
info!("Pruning old backups: {to_delete:?}");
|
||||
|
||||
for path in to_delete {
|
||||
if let Err(e) =
|
||||
tokio::fs::remove_dir_all(path).await.with_context(|| {
|
||||
format!("Failed to delete backup folder at {path:?}")
|
||||
})
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Copy".blue().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Copies database contents to another database.".dimmed()
|
||||
);
|
||||
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
if let Some(uri) = optional_string(&config.database_target.uri) {
|
||||
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) =
|
||||
optional_string(&config.database_target.address)
|
||||
{
|
||||
println!("{}: {address}", " - Target Address".dimmed());
|
||||
}
|
||||
if let Some(username) =
|
||||
optional_string(&config.database_target.username)
|
||||
{
|
||||
println!("{}: {username}", " - Target Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Name".dimmed(),
|
||||
config.database_target.db_name,
|
||||
);
|
||||
if !index {
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Indexing".dimmed(),
|
||||
"DISABLED".red(),
|
||||
);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start copy", yes)?;
|
||||
|
||||
let source_db = database::init(&config.database).await?;
|
||||
let target_db = if index {
|
||||
database::Client::new(&config.database_target).await?.db
|
||||
} else {
|
||||
database::init(&config.database_target).await?
|
||||
};
|
||||
|
||||
database::utils::copy(&source_db, &target_db).await
|
||||
}
|
||||
|
||||
async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} 🦎",
|
||||
"Komodo".bold(),
|
||||
"V1 Downgrade".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Downgrade the database to V1 compatible data structures."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("run downgrade", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
db.collection::<Document>("Server")
|
||||
.update_many(doc! {}, doc! { "$set": { "info": null } })
|
||||
.await
|
||||
.context("Failed to downgrade Server schema")?;
|
||||
|
||||
info!(
|
||||
"V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,586 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use futures_util::{StreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchExecutionResponseItem, Execution,
|
||||
},
|
||||
entities::{resource_link, update::Update},
|
||||
};
|
||||
|
||||
use crate::config::cli_config;
|
||||
|
||||
enum ExecutionResult {
|
||||
Single(Box<Update>),
|
||||
Batch(BatchExecutionResponse),
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
execution: &Execution,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if matches!(execution, Execution::None(_)) {
|
||||
println!("Got 'none' execution. Doing nothing...");
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
println!("Finished doing nothing. Exiting...");
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
println!("\n{}: Execution", "Mode".dimmed());
|
||||
match execution {
|
||||
Execution::None(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Deploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchCloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchBuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelRepoBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteNetwork(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteImage(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteVolume(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneVolumes(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneDockerBuilders(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneBuildx(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneSystem(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CommitSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunStackService(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::SendAlert(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::ClearRepoCache(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BackupCoreDatabase(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::GlobalAutoUpdate(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateAllServerKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateCoreKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
}
|
||||
|
||||
super::wait_for_enter("run execution", yes)?;
|
||||
|
||||
info!("Running Execution...");
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let res = match execution.clone() {
|
||||
Execution::RunAction(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunAction(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunProcedure(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunProcedure(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunBuild(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CancelBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Deploy(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeploy(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDestroyDeployment(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CloneRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchCloneRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchPullRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::BuildRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchBuildRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CancelRepoBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteNetwork(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneNetworks(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteImage(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneImages(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteVolume(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneVolumes(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneDockerBuilders(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneBuildx(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneSystem(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RunSync(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::CommitSync(request) => client
|
||||
.write(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeployStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeployStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::DeployStackIfChanged(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeployStackIfChanged(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchPullStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::StartStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDestroyStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunStackService(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::TestAlerter(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::SendAlert(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::ClearRepoCache(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BackupCoreDatabase(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::GlobalAutoUpdate(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateAllServerKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateCoreKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
println!("Finished sleeping!");
|
||||
std::process::exit(0)
|
||||
}
|
||||
Execution::None(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(ExecutionResult::Single(update)) => {
|
||||
poll_update_until_complete(&update).await
|
||||
}
|
||||
Ok(ExecutionResult::Batch(updates)) => {
|
||||
let mut handles = updates
|
||||
.iter()
|
||||
.map(|update| async move {
|
||||
match update {
|
||||
BatchExecutionResponseItem::Ok(update) => {
|
||||
poll_update_until_complete(update).await
|
||||
}
|
||||
BatchExecutionResponseItem::Err(e) => {
|
||||
error!("{e:#?}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
while let Some(res) = handles.next().await {
|
||||
match res {
|
||||
Ok(()) => {}
|
||||
Err(e) => {
|
||||
error!("{e:#?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{e:#?}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn poll_update_until_complete(
|
||||
update: &Update,
|
||||
) -> anyhow::Result<()> {
|
||||
let link = if update.id.is_empty() {
|
||||
let (resource_type, id) = update.target.extract_variant_id();
|
||||
resource_link(&cli_config().host, resource_type, id)
|
||||
} else {
|
||||
format!("{}/updates/{}", cli_config().host, update.id)
|
||||
};
|
||||
println!("Link: '{}'", link.bold());
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
let update = client.poll_update_until_complete(&update.id).await?;
|
||||
if update.success {
|
||||
println!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION SUCCESSFUL".green(),
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION FAILED".red(),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,182 +0,0 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use chrono::TimeZone;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Table};
|
||||
use komodo_client::{
|
||||
KomodoClient,
|
||||
entities::config::cli::{CliTableBorders, args::CliFormat},
|
||||
};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
use wildcard::Wildcard;
|
||||
|
||||
use crate::config::cli_config;
|
||||
|
||||
pub mod container;
|
||||
pub mod database;
|
||||
pub mod execute;
|
||||
pub mod list;
|
||||
pub mod terminal;
|
||||
pub mod update;
|
||||
|
||||
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {
|
||||
static KOMODO_CLIENT: OnceCell<KomodoClient> =
|
||||
OnceCell::const_new();
|
||||
KOMODO_CLIENT
|
||||
.get_or_try_init(|| async {
|
||||
let config = cli_config();
|
||||
let (Some(key), Some(secret)) =
|
||||
(&config.cli_key, &config.cli_secret)
|
||||
else {
|
||||
return Err(anyhow!(
|
||||
"Must provide both cli_key and cli_secret"
|
||||
));
|
||||
};
|
||||
KomodoClient::new(&config.host, key, secret)
|
||||
.with_healthcheck()
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn wait_for_enter(
|
||||
press_enter_to: &str,
|
||||
skip: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if skip {
|
||||
println!();
|
||||
return Ok(());
|
||||
}
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sanitizes uris of the form:
|
||||
/// `protocol://username:password@address`
|
||||
fn sanitize_uri(uri: &str) -> String {
|
||||
// protocol: `mongodb`
|
||||
// credentials_address: `username:password@address`
|
||||
let Some((protocol, credentials_address)) = uri.split_once("://")
|
||||
else {
|
||||
// If no protocol, return as-is
|
||||
return uri.to_string();
|
||||
};
|
||||
|
||||
// credentials: `username:password`
|
||||
let Some((credentials, address)) =
|
||||
credentials_address.split_once('@')
|
||||
else {
|
||||
// If no credentials, return as-is
|
||||
return uri.to_string();
|
||||
};
|
||||
|
||||
match credentials.split_once(':') {
|
||||
Some((username, _)) => {
|
||||
format!("{protocol}://{username}:*****@{address}")
|
||||
}
|
||||
None => {
|
||||
format!("{protocol}://*****@{address}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_items<T: PrintTable + Serialize>(
|
||||
items: Vec<T>,
|
||||
format: CliFormat,
|
||||
links: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
match format {
|
||||
CliFormat::Table => {
|
||||
let mut table = Table::new();
|
||||
let preset = {
|
||||
use comfy_table::presets::*;
|
||||
match cli_config().table_borders {
|
||||
None | Some(CliTableBorders::Horizontal) => {
|
||||
UTF8_HORIZONTAL_ONLY
|
||||
}
|
||||
Some(CliTableBorders::Vertical) => UTF8_FULL_CONDENSED,
|
||||
Some(CliTableBorders::Inside) => UTF8_NO_BORDERS,
|
||||
Some(CliTableBorders::Outside) => UTF8_BORDERS_ONLY,
|
||||
Some(CliTableBorders::All) => UTF8_FULL,
|
||||
}
|
||||
};
|
||||
table.load_preset(preset).set_header(
|
||||
T::header(links)
|
||||
.iter()
|
||||
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
|
||||
);
|
||||
for item in items {
|
||||
table.add_row(item.row(links));
|
||||
}
|
||||
println!("{table}");
|
||||
}
|
||||
CliFormat::Json => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&items)
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
trait PrintTable {
|
||||
fn header(links: bool) -> &'static [&'static str];
|
||||
fn row(self, links: bool) -> Vec<Cell>;
|
||||
}
|
||||
|
||||
fn parse_wildcards(items: &[String]) -> Vec<Wildcard<'_>> {
|
||||
items
|
||||
.iter()
|
||||
.flat_map(|i| {
|
||||
Wildcard::new(i.as_bytes()).inspect_err(|e| {
|
||||
warn!("Failed to parse wildcard: {i} | {e:?}")
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn matches_wildcards(
|
||||
wildcards: &[Wildcard<'_>],
|
||||
items: &[&str],
|
||||
) -> bool {
|
||||
if wildcards.is_empty() {
|
||||
return true;
|
||||
}
|
||||
items.iter().any(|item| {
|
||||
wildcards.iter().any(|wc| wc.is_match(item.as_bytes()))
|
||||
})
|
||||
}
|
||||
|
||||
fn format_timetamp(ts: i64) -> anyhow::Result<String> {
|
||||
let ts = chrono::Local
|
||||
.timestamp_millis_opt(ts)
|
||||
.single()
|
||||
.context("Invalid ts")?
|
||||
.format("%m/%d %H:%M:%S")
|
||||
.to_string();
|
||||
Ok(ts)
|
||||
}
|
||||
|
||||
fn clamp_sha(maybe_sha: &str) -> String {
|
||||
if maybe_sha.starts_with("sha256:") {
|
||||
maybe_sha[0..20].to_string() + "..."
|
||||
} else {
|
||||
maybe_sha.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
// fn text_link(link: &str, text: &str) -> String {
|
||||
// format!("\x1b]8;;{link}\x07{text}\x1b]8;;\x07")
|
||||
// }
|
||||
@@ -1,334 +0,0 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
read::{ListAllDockerContainers, ListServers},
|
||||
terminal::InitTerminal,
|
||||
},
|
||||
entities::{
|
||||
config::cli::args::terminal::{Attach, Connect, Exec},
|
||||
server::ServerQuery,
|
||||
terminal::{
|
||||
ContainerTerminalMode, TerminalRecreateMode,
|
||||
TerminalResizeMessage, TerminalStdinMessage,
|
||||
},
|
||||
},
|
||||
ws::terminal::TerminalWebsocket,
|
||||
};
|
||||
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub async fn handle_connect(
|
||||
Connect {
|
||||
server,
|
||||
name,
|
||||
command,
|
||||
recreate,
|
||||
}: &Connect,
|
||||
) -> anyhow::Result<()> {
|
||||
handle_terminal_forwarding(async {
|
||||
super::komodo_client()
|
||||
.await?
|
||||
.connect_server_terminal(
|
||||
server.to_string(),
|
||||
Some(name.to_string()),
|
||||
Some(InitTerminal {
|
||||
command: command.clone(),
|
||||
recreate: if *recreate {
|
||||
TerminalRecreateMode::Always
|
||||
} else {
|
||||
TerminalRecreateMode::DifferentCommand
|
||||
},
|
||||
mode: None,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn handle_exec(
|
||||
Exec {
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
recreate,
|
||||
}: &Exec,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = get_server(server.clone(), container).await?;
|
||||
handle_terminal_forwarding(async {
|
||||
super::komodo_client()
|
||||
.await?
|
||||
.connect_container_terminal(
|
||||
server,
|
||||
container.to_string(),
|
||||
None,
|
||||
Some(InitTerminal {
|
||||
command: Some(shell.to_string()),
|
||||
recreate: if *recreate {
|
||||
TerminalRecreateMode::Always
|
||||
} else {
|
||||
TerminalRecreateMode::DifferentCommand
|
||||
},
|
||||
mode: Some(ContainerTerminalMode::Exec),
|
||||
}),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn handle_attach(
|
||||
Attach {
|
||||
server,
|
||||
container,
|
||||
recreate,
|
||||
}: &Attach,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = get_server(server.clone(), container).await?;
|
||||
handle_terminal_forwarding(async {
|
||||
super::komodo_client()
|
||||
.await?
|
||||
.connect_container_terminal(
|
||||
server,
|
||||
container.to_string(),
|
||||
None,
|
||||
Some(InitTerminal {
|
||||
command: None,
|
||||
recreate: if *recreate {
|
||||
TerminalRecreateMode::Always
|
||||
} else {
|
||||
TerminalRecreateMode::DifferentCommand
|
||||
},
|
||||
mode: Some(ContainerTerminalMode::Attach),
|
||||
}),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_server(
|
||||
server: Option<String>,
|
||||
container: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
if let Some(server) = server {
|
||||
return Ok(server);
|
||||
}
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let mut containers = client
|
||||
.read(ListAllDockerContainers {
|
||||
servers: Default::default(),
|
||||
containers: vec![container.to_string()],
|
||||
})
|
||||
.await?;
|
||||
|
||||
if containers.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Did not find any container matching {container}"
|
||||
));
|
||||
}
|
||||
|
||||
if containers.len() == 1 {
|
||||
return containers
|
||||
.pop()
|
||||
.context("Shouldn't happen")?
|
||||
.server_id
|
||||
.context("Container doesn't have server_id");
|
||||
}
|
||||
|
||||
let servers = containers
|
||||
.into_iter()
|
||||
.flat_map(|container| container.server_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let servers = client
|
||||
.read(ListServers {
|
||||
query: ServerQuery::builder().names(servers).build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|server| format!("\t- {}", server.name.bold()))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Err(anyhow!(
|
||||
"Multiple containers matching '{}' on Servers:\n{servers}",
|
||||
container.bold(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn handle_terminal_forwarding<
|
||||
C: Future<Output = anyhow::Result<TerminalWebsocket>>,
|
||||
>(
|
||||
connect: C,
|
||||
) -> anyhow::Result<()> {
|
||||
// Need to forward multiple sources into ws write
|
||||
let (write_tx, mut write_rx) =
|
||||
tokio::sync::mpsc::channel::<TerminalStdinMessage>(1024);
|
||||
|
||||
// ================
|
||||
// SETUP RESIZING
|
||||
// ================
|
||||
|
||||
// Subscribe to SIGWINCH for resize messages
|
||||
let mut sigwinch = tokio::signal::unix::signal(
|
||||
tokio::signal::unix::SignalKind::window_change(),
|
||||
)
|
||||
.context("failed to register SIGWINCH handler")?;
|
||||
|
||||
// Send first resize messsage, bailing if it fails to get the size.
|
||||
write_tx.send(resize_message()?).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
|
||||
let forward_resize = async {
|
||||
while future_or_cancel(sigwinch.recv(), &cancel)
|
||||
.await
|
||||
.flatten()
|
||||
.is_some()
|
||||
{
|
||||
if let Ok(resize_message) = resize_message()
|
||||
&& write_tx.send(resize_message).await.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
let forward_stdin = async {
|
||||
let mut stdin = tokio::io::stdin();
|
||||
let mut buf = [0u8; 8192];
|
||||
while let Some(Ok(n)) =
|
||||
future_or_cancel(stdin.read(&mut buf), &cancel).await
|
||||
{
|
||||
// EOF
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
let bytes = &buf[..n];
|
||||
// Check for disconnect sequence (alt + q)
|
||||
if bytes == [197, 147] {
|
||||
break;
|
||||
}
|
||||
// Forward bytes
|
||||
if write_tx
|
||||
.send(TerminalStdinMessage::Forward(bytes.to_vec()))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
};
|
||||
}
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
// =====================
|
||||
// CONNECT AND FORWARD
|
||||
// =====================
|
||||
|
||||
let (mut ws_write, mut ws_read) = connect.await?.split();
|
||||
|
||||
let forward_write = async {
|
||||
while let Some(message) =
|
||||
future_or_cancel(write_rx.recv(), &cancel).await.flatten()
|
||||
{
|
||||
if let Err(e) = ws_write.send_stdin_message(message).await {
|
||||
cancel.cancel();
|
||||
return Some(e);
|
||||
};
|
||||
}
|
||||
cancel.cancel();
|
||||
None
|
||||
};
|
||||
|
||||
let forward_read = async {
|
||||
let mut stdout = tokio::io::stdout();
|
||||
while let Some(msg) =
|
||||
future_or_cancel(ws_read.receive_stdout(), &cancel).await
|
||||
{
|
||||
let bytes = match msg {
|
||||
Ok(Some(bytes)) => bytes,
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
cancel.cancel();
|
||||
return Some(e.context("Websocket read error"));
|
||||
}
|
||||
};
|
||||
if let Err(e) = stdout
|
||||
.write_all(&bytes)
|
||||
.await
|
||||
.context("Failed to write text to stdout")
|
||||
{
|
||||
cancel.cancel();
|
||||
return Some(e);
|
||||
}
|
||||
let _ = stdout.flush().await;
|
||||
}
|
||||
cancel.cancel();
|
||||
None
|
||||
};
|
||||
|
||||
let guard = RawModeGuard::enable_raw_mode()?;
|
||||
|
||||
let (_, _, write_error, read_error) = tokio::join!(
|
||||
forward_resize,
|
||||
forward_stdin,
|
||||
forward_write,
|
||||
forward_read
|
||||
);
|
||||
|
||||
drop(guard);
|
||||
|
||||
if let Some(e) = write_error {
|
||||
eprintln!("\nFailed to forward stdin | {e:#}");
|
||||
}
|
||||
|
||||
if let Some(e) = read_error {
|
||||
eprintln!("\nFailed to forward stdout | {e:#}");
|
||||
}
|
||||
|
||||
println!("\n\n{} {}", "connection".bold(), "closed".red().bold());
|
||||
|
||||
// It doesn't seem to exit by itself after the raw mode stuff.
|
||||
std::process::exit(0)
|
||||
}
|
||||
|
||||
fn resize_message() -> anyhow::Result<TerminalStdinMessage> {
|
||||
let (cols, rows) = crossterm::terminal::size()
|
||||
.context("Failed to get terminal size")?;
|
||||
Ok(TerminalStdinMessage::Resize(TerminalResizeMessage {
|
||||
rows,
|
||||
cols,
|
||||
}))
|
||||
}
|
||||
|
||||
struct RawModeGuard;
|
||||
|
||||
impl RawModeGuard {
|
||||
fn enable_raw_mode() -> anyhow::Result<Self> {
|
||||
crossterm::terminal::enable_raw_mode()
|
||||
.context("Failed to enable terminal raw mode")?;
|
||||
Ok(Self)
|
||||
}
|
||||
}
|
||||
impl Drop for RawModeGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = crossterm::terminal::disable_raw_mode() {
|
||||
eprintln!("Failed to disable terminal raw mode | {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn future_or_cancel<T, F: Future<Output = T>>(
|
||||
fut: F,
|
||||
cancel: &CancellationToken,
|
||||
) -> Option<T> {
|
||||
tokio::select! {
|
||||
res = fut => Some(res),
|
||||
_ = cancel.cancelled() => None
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
use komodo_client::entities::{
|
||||
build::PartialBuildConfig,
|
||||
config::cli::args::update::UpdateCommand,
|
||||
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
|
||||
server::PartialServerConfig, stack::PartialStackConfig,
|
||||
sync::PartialResourceSyncConfig,
|
||||
};
|
||||
|
||||
mod resource;
|
||||
mod user;
|
||||
mod variable;
|
||||
|
||||
pub async fn handle(command: &UpdateCommand) -> anyhow::Result<()> {
|
||||
match command {
|
||||
UpdateCommand::Build(update) => {
|
||||
resource::update::<PartialBuildConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Deployment(update) => {
|
||||
resource::update::<PartialDeploymentConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Repo(update) => {
|
||||
resource::update::<PartialRepoConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Server(update) => {
|
||||
resource::update::<PartialServerConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Stack(update) => {
|
||||
resource::update::<PartialStackConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Sync(update) => {
|
||||
resource::update::<PartialResourceSyncConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Variable {
|
||||
name,
|
||||
value,
|
||||
secret,
|
||||
yes,
|
||||
} => variable::update(name, value, *secret, *yes).await,
|
||||
UpdateCommand::User { username, command } => {
|
||||
user::update(username, command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,152 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
UpdateBuild, UpdateDeployment, UpdateRepo, UpdateResourceSync,
|
||||
UpdateServer, UpdateStack,
|
||||
},
|
||||
entities::{
|
||||
build::PartialBuildConfig,
|
||||
config::cli::args::update::UpdateResource,
|
||||
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
|
||||
server::PartialServerConfig, stack::PartialStackConfig,
|
||||
sync::PartialResourceSyncConfig,
|
||||
},
|
||||
};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
|
||||
pub async fn update<
|
||||
T: std::fmt::Debug + Serialize + DeserializeOwned + ResourceUpdate,
|
||||
>(
|
||||
UpdateResource {
|
||||
resource,
|
||||
update,
|
||||
yes,
|
||||
}: &UpdateResource,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update {}\n", "Mode".dimmed(), T::resource_type());
|
||||
println!(" - {}: {resource}", "Name".dimmed());
|
||||
|
||||
let config = serde_qs::from_str::<T>(update)
|
||||
.context("Failed to deserialize config")?;
|
||||
|
||||
match serde_json::to_string_pretty(&config) {
|
||||
Ok(config) => {
|
||||
println!(" - {}: {config}", "Update".dimmed());
|
||||
}
|
||||
Err(_) => {
|
||||
println!(" - {}: {config:#?}", "Update".dimmed());
|
||||
}
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update resource", *yes)?;
|
||||
|
||||
config.apply(resource).await
|
||||
}
|
||||
|
||||
pub trait ResourceUpdate {
|
||||
fn resource_type() -> &'static str;
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialBuildConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Build"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateBuild {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update build config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialDeploymentConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Deployment"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateDeployment {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update deployment config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialRepoConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Repo"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateRepo {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update repo config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialServerConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Server"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateServer {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update server config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialStackConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Stack"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateStack {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update stack config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialResourceSyncConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Sync"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateResourceSync {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update sync config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,122 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
cli::args::{CliEnabled, update::UpdateUserCommand},
|
||||
empty_or_redacted,
|
||||
},
|
||||
optional_string,
|
||||
};
|
||||
|
||||
use crate::{command::sanitize_uri, config::cli_config};
|
||||
|
||||
pub async fn update(
|
||||
username: &str,
|
||||
command: &UpdateUserCommand,
|
||||
) -> anyhow::Result<()> {
|
||||
match command {
|
||||
UpdateUserCommand::Password {
|
||||
password,
|
||||
unsanitized,
|
||||
yes,
|
||||
} => {
|
||||
update_password(username, password, *unsanitized, *yes).await
|
||||
}
|
||||
UpdateUserCommand::SuperAdmin { enabled, yes } => {
|
||||
update_super_admin(username, *enabled, *yes).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_password(
|
||||
username: &str,
|
||||
password: &str,
|
||||
unsanitized: bool,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update Password\n", "Mode".dimmed());
|
||||
println!(" - {}: {username}", "Username".dimmed());
|
||||
if unsanitized {
|
||||
println!(" - {}: {password}", "Password".dimmed());
|
||||
} else {
|
||||
println!(
|
||||
" - {}: {}",
|
||||
"Password".dimmed(),
|
||||
empty_or_redacted(password)
|
||||
);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update password", yes)?;
|
||||
|
||||
info!("Updating password...");
|
||||
|
||||
let db = database::Client::new(&cli_config().database).await?;
|
||||
|
||||
let user = db
|
||||
.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.context("Failed to query database for user")?
|
||||
.context("No user found with given username")?;
|
||||
|
||||
db.set_user_password(&user, password).await?;
|
||||
|
||||
info!("Password updated ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_super_admin(
|
||||
username: &str,
|
||||
super_admin: CliEnabled,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!("\n{}: Update Super Admin\n", "Mode".dimmed());
|
||||
println!(" - {}: {username}", "Username".dimmed());
|
||||
println!(" - {}: {super_admin}\n", "Super Admin".dimmed());
|
||||
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("update super admin", yes)?;
|
||||
|
||||
info!("Updating super admin...");
|
||||
|
||||
let db = database::Client::new(&config.database).await?;
|
||||
|
||||
// Make sure the user exists first before saying it is successful.
|
||||
let user = db
|
||||
.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.context("Failed to query database for user")?
|
||||
.context("No user found with given username")?;
|
||||
|
||||
let super_admin: bool = super_admin.into();
|
||||
db.users
|
||||
.update_one(
|
||||
doc! { "username": user.username },
|
||||
doc! { "$set": { "super_admin": super_admin } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update user super admin on db")?;
|
||||
|
||||
info!("Super admin updated ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::api::{
|
||||
read::GetVariable,
|
||||
write::{
|
||||
CreateVariable, UpdateVariableIsSecret, UpdateVariableValue,
|
||||
},
|
||||
};
|
||||
|
||||
pub async fn update(
|
||||
name: &str,
|
||||
value: &str,
|
||||
secret: Option<bool>,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update Variable\n", "Mode".dimmed());
|
||||
println!(" - {}: {name}", "Name".dimmed());
|
||||
println!(" - {}: {value}", "Value".dimmed());
|
||||
if let Some(secret) = secret {
|
||||
println!(" - {}: {secret}", "Is Secret".dimmed());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update variable", yes)?;
|
||||
|
||||
let client = crate::command::komodo_client().await?;
|
||||
|
||||
let Ok(existing) = client
|
||||
.read(GetVariable {
|
||||
name: name.to_string(),
|
||||
})
|
||||
.await
|
||||
else {
|
||||
// Create the variable
|
||||
client
|
||||
.write(CreateVariable {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
is_secret: secret.unwrap_or_default(),
|
||||
description: Default::default(),
|
||||
})
|
||||
.await
|
||||
.context("Failed to create variable")?;
|
||||
info!("Variable created ✅");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
client
|
||||
.write(UpdateVariableValue {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
})
|
||||
.await
|
||||
.context("Failed to update variable 'value'")?;
|
||||
info!("Variable 'value' updated ✅");
|
||||
|
||||
let Some(secret) = secret else { return Ok(()) };
|
||||
|
||||
if secret != existing.is_secret {
|
||||
client
|
||||
.write(UpdateVariableIsSecret {
|
||||
name: name.to_string(),
|
||||
is_secret: secret,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update variable 'is_secret'")?;
|
||||
info!("Variable 'is_secret' updated to {secret} ✅");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
use std::{path::PathBuf, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use colored::Colorize;
|
||||
use environment_file::maybe_read_item_from_file;
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
DatabaseConfig,
|
||||
cli::{
|
||||
CliConfig, Env,
|
||||
args::{CliArgs, Command, Execute, database::DatabaseCommand},
|
||||
},
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
|
||||
pub fn cli_args() -> &'static CliArgs {
|
||||
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn cli_env() -> &'static Env {
|
||||
static CLI_ARGS: OnceLock<Env> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(|| {
|
||||
match envy::from_env()
|
||||
.context("Failed to parse Komodo CLI environment")
|
||||
{
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:?}")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn cli_config() -> &'static CliConfig {
|
||||
static CLI_CONFIG: OnceLock<CliConfig> = OnceLock::new();
|
||||
CLI_CONFIG.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let env = cli_env().clone();
|
||||
let config_paths = args
|
||||
.config_path
|
||||
.clone()
|
||||
.unwrap_or(env.komodo_cli_config_paths);
|
||||
let debug_startup =
|
||||
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
|
||||
|
||||
if debug_startup {
|
||||
println!(
|
||||
"{}: Komodo CLI version: {}",
|
||||
"DEBUG".cyan(),
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
println!(
|
||||
"{}: {}: {config_paths:?}",
|
||||
"DEBUG".cyan(),
|
||||
"Config Paths".dimmed(),
|
||||
);
|
||||
}
|
||||
|
||||
let config_keywords = args
|
||||
.config_keyword
|
||||
.clone()
|
||||
.unwrap_or(env.komodo_cli_config_keywords);
|
||||
let config_keywords = config_keywords
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
if debug_startup {
|
||||
println!(
|
||||
"{}: {}: {config_keywords:?}",
|
||||
"DEBUG".cyan(),
|
||||
"Config File Keywords".dimmed(),
|
||||
);
|
||||
}
|
||||
let mut unparsed_config = (config::ConfigLoader {
|
||||
paths: &config_paths
|
||||
.iter()
|
||||
.map(PathBuf::as_path)
|
||||
.collect::<Vec<_>>(),
|
||||
match_wildcards: &config_keywords,
|
||||
include_file_name: ".kminclude",
|
||||
merge_nested: env.komodo_cli_merge_nested_config,
|
||||
extend_array: env.komodo_cli_extend_config_arrays,
|
||||
debug_print: debug_startup,
|
||||
})
|
||||
.load::<serde_json::Map<String, serde_json::Value>>()
|
||||
.expect("failed at parsing config from paths");
|
||||
let init_parsed_config = serde_json::from_value::<CliConfig>(
|
||||
serde_json::Value::Object(unparsed_config.clone()),
|
||||
)
|
||||
.context("Failed to parse config")
|
||||
.unwrap();
|
||||
|
||||
let (host, key, secret) = match &args.command {
|
||||
Command::Execute(Execute {
|
||||
host, key, secret, ..
|
||||
}) => (host.clone(), key.clone(), secret.clone()),
|
||||
_ => (None, None, None),
|
||||
};
|
||||
|
||||
let backups_folder = match &args.command {
|
||||
Command::Database {
|
||||
command: DatabaseCommand::Backup { backups_folder, .. },
|
||||
} => backups_folder.clone(),
|
||||
Command::Database {
|
||||
command: DatabaseCommand::Restore { backups_folder, .. },
|
||||
} => backups_folder.clone(),
|
||||
_ => None,
|
||||
};
|
||||
let (uri, address, username, password, db_name) =
|
||||
match &args.command {
|
||||
Command::Database {
|
||||
command:
|
||||
DatabaseCommand::Copy {
|
||||
uri,
|
||||
address,
|
||||
username,
|
||||
password,
|
||||
db_name,
|
||||
..
|
||||
},
|
||||
} => (
|
||||
uri.clone(),
|
||||
address.clone(),
|
||||
username.clone(),
|
||||
password.clone(),
|
||||
db_name.clone(),
|
||||
),
|
||||
_ => (None, None, None, None, None),
|
||||
};
|
||||
|
||||
let profile = args
|
||||
.profile
|
||||
.as_ref()
|
||||
.or(init_parsed_config.default_profile.as_ref());
|
||||
|
||||
let unparsed_config = if let Some(profile) = profile
|
||||
&& !profile.is_empty()
|
||||
{
|
||||
// Find the profile config,
|
||||
// then merge it with the Default config.
|
||||
let serde_json::Value::Array(profiles) = unparsed_config
|
||||
.remove("profile")
|
||||
.context("Config has no profiles, but a profile is required")
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("`config.profile` is not array");
|
||||
};
|
||||
let Some(profile_config) = profiles.into_iter().find(|p| {
|
||||
let Ok(parsed) =
|
||||
serde_json::from_value::<CliConfig>(p.clone())
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
&parsed.config_profile == profile
|
||||
|| parsed
|
||||
.config_aliases
|
||||
.iter()
|
||||
.any(|alias| alias == profile)
|
||||
}) else {
|
||||
panic!("No profile matching '{profile}' was found.");
|
||||
};
|
||||
let serde_json::Value::Object(profile_config) = profile_config
|
||||
else {
|
||||
panic!("Profile config is not Object type.");
|
||||
};
|
||||
config::merge_config(
|
||||
unparsed_config,
|
||||
profile_config.clone(),
|
||||
env.komodo_cli_merge_nested_config,
|
||||
env.komodo_cli_extend_config_arrays,
|
||||
)
|
||||
.unwrap_or(profile_config)
|
||||
} else {
|
||||
unparsed_config
|
||||
};
|
||||
let config = serde_json::from_value::<CliConfig>(
|
||||
serde_json::Value::Object(unparsed_config),
|
||||
)
|
||||
.context("Failed to parse final config")
|
||||
.unwrap();
|
||||
let config_profile = if config.config_profile.is_empty() {
|
||||
String::from("None")
|
||||
} else {
|
||||
config.config_profile
|
||||
};
|
||||
|
||||
CliConfig {
|
||||
config_profile,
|
||||
config_aliases: config.config_aliases,
|
||||
default_profile: config.default_profile,
|
||||
table_borders: env
|
||||
.komodo_cli_table_borders
|
||||
.or(config.table_borders),
|
||||
host: host
|
||||
.or(env.komodo_cli_host)
|
||||
.or(env.komodo_host)
|
||||
.unwrap_or(config.host),
|
||||
cli_key: key.or(env.komodo_cli_key).or(config.cli_key),
|
||||
cli_secret: secret
|
||||
.or(env.komodo_cli_secret)
|
||||
.or(config.cli_secret),
|
||||
backups_folder: backups_folder
|
||||
.or(env.komodo_cli_backups_folder)
|
||||
.unwrap_or(config.backups_folder),
|
||||
max_backups: env
|
||||
.komodo_cli_max_backups
|
||||
.unwrap_or(config.max_backups),
|
||||
database_target: DatabaseConfig {
|
||||
uri: uri
|
||||
.or(env.komodo_cli_database_target_uri)
|
||||
.unwrap_or(config.database_target.uri),
|
||||
address: address
|
||||
.or(env.komodo_cli_database_target_address)
|
||||
.unwrap_or(config.database_target.address),
|
||||
username: username
|
||||
.or(env.komodo_cli_database_target_username)
|
||||
.unwrap_or(config.database_target.username),
|
||||
password: password
|
||||
.or(env.komodo_cli_database_target_password)
|
||||
.unwrap_or(config.database_target.password),
|
||||
db_name: db_name
|
||||
.or(env.komodo_cli_database_target_db_name)
|
||||
.unwrap_or(config.database_target.db_name),
|
||||
app_name: config.database_target.app_name,
|
||||
},
|
||||
database: DatabaseConfig {
|
||||
uri: maybe_read_item_from_file(
|
||||
env.komodo_database_uri_file,
|
||||
env.komodo_database_uri,
|
||||
)
|
||||
.unwrap_or(config.database.uri),
|
||||
address: env
|
||||
.komodo_database_address
|
||||
.unwrap_or(config.database.address),
|
||||
username: maybe_read_item_from_file(
|
||||
env.komodo_database_username_file,
|
||||
env.komodo_database_username,
|
||||
)
|
||||
.unwrap_or(config.database.username),
|
||||
password: maybe_read_item_from_file(
|
||||
env.komodo_database_password_file,
|
||||
env.komodo_database_password,
|
||||
)
|
||||
.unwrap_or(config.database.password),
|
||||
db_name: env
|
||||
.komodo_database_db_name
|
||||
.unwrap_or(config.database.db_name),
|
||||
app_name: config.database.app_name,
|
||||
},
|
||||
cli_logging: LogConfig {
|
||||
level: env
|
||||
.komodo_cli_logging_level
|
||||
.unwrap_or(config.cli_logging.level),
|
||||
stdio: env
|
||||
.komodo_cli_logging_stdio
|
||||
.unwrap_or(config.cli_logging.stdio),
|
||||
pretty: env
|
||||
.komodo_cli_logging_pretty
|
||||
.unwrap_or(config.cli_logging.pretty),
|
||||
location: false,
|
||||
ansi: env
|
||||
.komodo_cli_logging_ansi
|
||||
.unwrap_or(config.cli_logging.ansi),
|
||||
otlp_endpoint: env
|
||||
.komodo_cli_logging_otlp_endpoint
|
||||
.unwrap_or(config.cli_logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.komodo_cli_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_service_name),
|
||||
opentelemetry_scope_name: env
|
||||
.komodo_cli_logging_opentelemetry_scope_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_scope_name),
|
||||
},
|
||||
profile: config.profile,
|
||||
}
|
||||
})
|
||||
}
|
||||
485
bin/cli/src/exec.rs
Normal file
485
bin/cli/src/exec.rs
Normal file
@@ -0,0 +1,485 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::execute::{BatchExecutionResponse, Execution},
|
||||
entities::update::Update,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
helpers::wait_for_enter,
|
||||
state::{cli_args, komodo_client},
|
||||
};
|
||||
|
||||
pub enum ExecutionResult {
|
||||
Single(Update),
|
||||
Batch(BatchExecutionResponse),
|
||||
}
|
||||
|
||||
pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
if matches!(execution, Execution::None(_)) {
|
||||
println!("Got 'none' execution. Doing nothing...");
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
println!("Finished doing nothing. Exiting...");
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
println!("\n{}: Execution", "Mode".dimmed());
|
||||
match &execution {
|
||||
Execution::None(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Deploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchCloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchBuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelRepoBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteNetwork(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteImage(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteVolume(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneVolumes(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneDockerBuilders(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneBuildx(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneSystem(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CommitSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
}
|
||||
|
||||
if !cli_args().yes {
|
||||
wait_for_enter("run execution")?;
|
||||
}
|
||||
|
||||
info!("Running Execution...");
|
||||
|
||||
let res = match execution {
|
||||
Execution::RunAction(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchRunAction(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::RunProcedure(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchRunProcedure(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::RunBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchRunBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CancelBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::Deploy(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDeploy(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DestroyDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDestroyDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CloneRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchCloneRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchPullRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::BuildRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchBuildRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CancelRepoBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DestroyContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeleteNetwork(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneNetworks(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeleteImage(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneImages(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeleteVolume(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneVolumes(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneDockerBuilders(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneBuildx(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneSystem(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RunSync(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::CommitSync(request) => komodo_client()
|
||||
.write(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeployStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDeployStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::DeployStackIfChanged(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDeployStackIfChanged(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DestroyStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDestroyStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::TestAlerter(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
println!("Finished sleeping!");
|
||||
std::process::exit(0)
|
||||
}
|
||||
Execution::None(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(ExecutionResult::Single(update)) => {
|
||||
println!("\n{}: {update:#?}", "SUCCESS".green())
|
||||
}
|
||||
Ok(ExecutionResult::Batch(update)) => {
|
||||
println!("\n{}: {update:#?}", "SUCCESS".green())
|
||||
}
|
||||
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
17
bin/cli/src/helpers.rs
Normal file
17
bin/cli/src/helpers.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
|
||||
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,96 +1,32 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::entities::config::cli::args;
|
||||
use komodo_client::api::read::GetVersion;
|
||||
|
||||
use crate::config::cli_config;
|
||||
|
||||
mod command;
|
||||
mod config;
|
||||
|
||||
async fn app() -> anyhow::Result<()> {
|
||||
dotenvy::dotenv().ok();
|
||||
logger::init(&config::cli_config().cli_logging)?;
|
||||
let args = config::cli_args();
|
||||
let env = config::cli_env();
|
||||
let debug_load =
|
||||
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
|
||||
|
||||
match &args.command {
|
||||
args::Command::Config {
|
||||
all_profiles,
|
||||
unsanitized,
|
||||
} => {
|
||||
let mut config = if *unsanitized {
|
||||
cli_config().clone()
|
||||
} else {
|
||||
cli_config().sanitized()
|
||||
};
|
||||
if !*all_profiles {
|
||||
config.profile = Default::default();
|
||||
}
|
||||
if debug_load {
|
||||
println!("\n{config:#?}");
|
||||
} else {
|
||||
println!(
|
||||
"\nCLI Config {}",
|
||||
serde_json::to_string_pretty(&config)
|
||||
.context("Failed to serialize config for pretty print")?
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
args::Command::Container(container) => {
|
||||
command::container::handle(container).await
|
||||
}
|
||||
args::Command::Inspect(inspect) => {
|
||||
command::container::inspect_container(inspect).await
|
||||
}
|
||||
args::Command::List(list) => command::list::handle(list).await,
|
||||
args::Command::Execute(args) => {
|
||||
command::execute::handle(&args.execution, args.yes).await
|
||||
}
|
||||
args::Command::Update { command } => {
|
||||
command::update::handle(command).await
|
||||
}
|
||||
args::Command::Connect(connect) => {
|
||||
command::terminal::handle_connect(connect).await
|
||||
}
|
||||
args::Command::Exec(exec) => {
|
||||
command::terminal::handle_exec(exec).await
|
||||
}
|
||||
args::Command::Attach(attach) => {
|
||||
command::terminal::handle_attach(attach).await
|
||||
}
|
||||
args::Command::Key { command } => {
|
||||
noise::key::command::handle(command).await
|
||||
}
|
||||
args::Command::Database { command } => {
|
||||
command::database::handle(command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
mod args;
|
||||
mod exec;
|
||||
mod helpers;
|
||||
mod state;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let mut term_signal = tokio::signal::unix::signal(
|
||||
tokio::signal::unix::SignalKind::terminate(),
|
||||
)?;
|
||||
tokio::select! {
|
||||
res = tokio::spawn(app()) => match res {
|
||||
Ok(Err(e)) => {
|
||||
eprintln!("{}: {e}", "ERROR".red());
|
||||
std::process::exit(1)
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("{}: {e}", "ERROR".red());
|
||||
std::process::exit(1)
|
||||
},
|
||||
Ok(_) => {}
|
||||
},
|
||||
_ = term_signal.recv() => {},
|
||||
tracing_subscriber::fmt().with_target(false).init();
|
||||
|
||||
info!(
|
||||
"Komodo CLI version: {}",
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
|
||||
let version =
|
||||
state::komodo_client().read(GetVersion {}).await?.version;
|
||||
info!("Komodo Core version: {}", version.blue().bold());
|
||||
|
||||
match &state::cli_args().command {
|
||||
args::Command::Execute { execution } => {
|
||||
exec::run(execution.to_owned()).await?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
48
bin/cli/src/state.rs
Normal file
48
bin/cli/src/state.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use clap::Parser;
|
||||
use komodo_client::KomodoClient;
|
||||
use merge_config_files::parse_config_file;
|
||||
|
||||
pub fn cli_args() -> &'static crate::args::CliArgs {
|
||||
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn komodo_client() -> &'static KomodoClient {
|
||||
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
|
||||
KOMODO_CLIENT.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let crate::args::CredsFile { url, key, secret } =
|
||||
match (&args.url, &args.key, &args.secret) {
|
||||
(Some(url), Some(key), Some(secret)) => {
|
||||
crate::args::CredsFile {
|
||||
url: url.clone(),
|
||||
key: key.clone(),
|
||||
secret: secret.clone(),
|
||||
}
|
||||
}
|
||||
(url, key, secret) => {
|
||||
let mut creds: crate::args::CredsFile =
|
||||
parse_config_file(cli_args().creds.as_str())
|
||||
.expect("failed to parse Komodo credentials");
|
||||
|
||||
if let Some(url) = url {
|
||||
creds.url.clone_from(url);
|
||||
}
|
||||
if let Some(key) = key {
|
||||
creds.key.clone_from(key);
|
||||
}
|
||||
if let Some(secret) = secret {
|
||||
creds.secret.clone_from(secret);
|
||||
}
|
||||
|
||||
creds
|
||||
}
|
||||
};
|
||||
futures::executor::block_on(
|
||||
KomodoClient::new(url, key, secret).with_healthcheck(),
|
||||
)
|
||||
.expect("failed to initialize Komodo client")
|
||||
})
|
||||
}
|
||||
@@ -18,35 +18,28 @@ path = "src/main.rs"
|
||||
komodo_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
interpolate.workspace = true
|
||||
secret_file.workspace = true
|
||||
formatting.workspace = true
|
||||
transport.workspace = true
|
||||
database.workspace = true
|
||||
encoding.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
cache.workspace = true
|
||||
noise.workspace = true
|
||||
git.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
merge_config_files.workspace = true
|
||||
async_timing_util.workspace = true
|
||||
partial_derive2.workspace = true
|
||||
derive_variants.workspace = true
|
||||
mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
toml_pretty.workspace = true
|
||||
mungos.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
aws-credential-types.workspace = true
|
||||
english-to-cron.workspace = true
|
||||
openidconnect.workspace = true
|
||||
jsonwebtoken.workspace = true
|
||||
futures-util.workspace = true
|
||||
axum-server.workspace = true
|
||||
ordered_hash_map.workspace = true
|
||||
openidconnect.workspace = true
|
||||
urlencoding.workspace = true
|
||||
aws-sdk-ec2.workspace = true
|
||||
aws-config.workspace = true
|
||||
@@ -54,34 +47,29 @@ tokio-util.workspace = true
|
||||
axum-extra.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml_ng.workspace = true
|
||||
serde_qs.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
typeshare.workspace = true
|
||||
chrono-tz.workspace = true
|
||||
indexmap.workspace = true
|
||||
octorust.workspace = true
|
||||
wildcard.workspace = true
|
||||
arc-swap.workspace = true
|
||||
colored.workspace = true
|
||||
dashmap.workspace = true
|
||||
tracing.workspace = true
|
||||
reqwest.workspace = true
|
||||
futures.workspace = true
|
||||
nom_pem.workspace = true
|
||||
dotenvy.workspace = true
|
||||
anyhow.workspace = true
|
||||
croner.workspace = true
|
||||
chrono.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
rustls.workspace = true
|
||||
bytes.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
regex.workspace = true
|
||||
axum.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
envy.workspace = true
|
||||
rand.workspace = true
|
||||
hmac.workspace = true
|
||||
sha2.workspace = true
|
||||
jwt.workspace = true
|
||||
hex.workspace = true
|
||||
url.workspace = true
|
||||
@@ -1,8 +1,7 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.90.0-trixie AS core-builder
|
||||
RUN cargo install cargo-strip
|
||||
FROM rust:1.84.1-bullseye AS core-builder
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -10,12 +9,9 @@ COPY ./lib ./lib
|
||||
COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/core ./bin/core
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile app
|
||||
RUN cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
RUN cargo build -p komodo_core --release
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
@@ -26,20 +22,20 @@ RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
FROM debian:trixie-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Setup an application directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
COPY --from=core-builder /builder/target/release/core /usr/local/bin/core
|
||||
COPY --from=core-builder /builder/target/release/km /usr/local/bin/km
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
# Set $DENO_DIR and preload external Deno deps
|
||||
@@ -48,21 +44,12 @@ RUN mkdir /action-cache && \
|
||||
cd /action-cache && \
|
||||
deno install jsr:@std/yaml jsr:@std/toml
|
||||
|
||||
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
ENTRYPOINT [ "core" ]
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
## Core deps installer
|
||||
|
||||
apt-get update
|
||||
apt-get install -y git curl ca-certificates iproute2
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Starship prompt
|
||||
curl -sS https://starship.rs/install.sh | sh -s -- --yes --bin-dir /usr/local/bin
|
||||
echo 'export STARSHIP_CONFIG=/starship.toml' >> /root/.bashrc
|
||||
echo 'eval "$(starship init bash)"' >> /root/.bashrc
|
||||
|
||||
@@ -13,28 +13,23 @@ FROM ${AARCH64_BINARIES} AS aarch64
|
||||
FROM ${FRONTEND_IMAGE} AS frontend
|
||||
|
||||
# Final Image
|
||||
FROM debian:trixie-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /core /app/core/linux/amd64
|
||||
COPY --from=aarch64 /core /app/core/linux/arm64
|
||||
RUN mv /app/core/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/core
|
||||
|
||||
# Same for km
|
||||
COPY --from=x86_64 /km /app/km/linux/amd64
|
||||
COPY --from=aarch64 /km /app/km/linux/arm64
|
||||
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
|
||||
COPY --from=x86_64 /core /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /core /app/arch/linux/arm64
|
||||
ARG TARGETPLATFORM
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/arch
|
||||
|
||||
# Copy default config / static frontend / deno binary
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY --from=frontend /frontend /app/frontend
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
@@ -44,22 +39,12 @@ RUN mkdir /action-cache && \
|
||||
cd /action-cache && \
|
||||
deno install jsr:@std/yaml jsr:@std/toml
|
||||
|
||||
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
ENTRYPOINT [ "entrypoint.sh" ]
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD [ "core" ]
|
||||
@@ -14,17 +14,17 @@ COPY ./client/core/ts ./client
|
||||
RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
|
||||
FROM debian:trixie-slim
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
COPY --from=binaries /core /usr/local/bin/core
|
||||
COPY --from=binaries /km /usr/local/bin/km
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
# Set $DENO_DIR and preload external Deno deps
|
||||
@@ -33,22 +33,12 @@ RUN mkdir /action-cache && \
|
||||
cd /action-cache && \
|
||||
deno install jsr:@std/yaml jsr:@std/toml
|
||||
|
||||
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
ENTRYPOINT [ "entrypoint.sh" ]
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD [ "core" ]
|
||||
@@ -4,6 +4,7 @@ use serde::Serialize;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -16,28 +17,6 @@ pub async fn send_alert(
|
||||
"{level} | If you see this message, then Alerter **{name}** is **working**\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Version mismatch detected ⚠️\nPeriphery: **{server_version}** | Core: **{core_version}**\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -49,7 +28,7 @@ pub async fn send_alert(
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} is now **connected**\n{link}"
|
||||
"{level} | **{name}**{region} is now **reachable**\n{link}"
|
||||
)
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
@@ -115,9 +94,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
|
||||
)
|
||||
format!("📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
@@ -127,9 +104,7 @@ pub async fn send_alert(
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}"
|
||||
)
|
||||
format!("⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}")
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
@@ -139,9 +114,7 @@ pub async fn send_alert(
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}"
|
||||
)
|
||||
format!("⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}")
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
@@ -153,9 +126,7 @@ pub async fn send_alert(
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
|
||||
)
|
||||
format!("🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
@@ -166,9 +137,7 @@ pub async fn send_alert(
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}"
|
||||
)
|
||||
format!("⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}")
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
@@ -181,17 +150,13 @@ pub async fn send_alert(
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}"
|
||||
)
|
||||
format!("⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}")
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}"
|
||||
)
|
||||
format!("{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}")
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
@@ -202,71 +167,18 @@ pub async fn send_alert(
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build **{name}** failed\nversion: **v{version}**\n{link}"
|
||||
)
|
||||
format!("{level} | Build **{name}** failed\nversion: **v{version}**\n{link}")
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for **{name}** failed\n{link}")
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure **{name}** failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action **{name}** failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | **{name}** ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
if !content.is_empty() {
|
||||
send_message(url, &content).await?;
|
||||
}
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, &content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
|
||||
@@ -1,55 +1,59 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use std::collections::HashSet;
|
||||
use ::slack::types::Block;
|
||||
use anyhow::{anyhow, Context};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures_util::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use futures::future::join_all;
|
||||
use komodo_client::entities::{
|
||||
ResourceTargetVariant,
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alerter::*,
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
stack::StackState,
|
||||
ResourceTargetVariant,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
use crate::helpers::{
|
||||
maintenance::is_in_maintenance, query::VariablesAndSecrets,
|
||||
};
|
||||
use crate::{config::core_config, state::db_client};
|
||||
use crate::helpers::interpolate::interpolate_variables_secrets_into_string;
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
|
||||
mod discord;
|
||||
mod ntfy;
|
||||
mod pushover;
|
||||
mod slack;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alerts(alerts: &[Alert]) {
|
||||
if alerts.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
let span =
|
||||
info_span!("send_alerts", alerts = format!("{alerts:?}"));
|
||||
async {
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"ERROR sending alerts | failed to get alerters from db | {e:#}"
|
||||
)
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles = alerts
|
||||
.iter()
|
||||
.map(|alert| send_alert_to_alerters(&alerters, alert));
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
join_all(handles).await;
|
||||
}
|
||||
.instrument(span)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -74,13 +78,6 @@ pub async fn send_alert_to_alerter(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if is_in_maintenance(
|
||||
&alerter.config.maintenance_windows,
|
||||
komodo_timestamp(),
|
||||
) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let alert_type = alert.data.extract_variant();
|
||||
|
||||
// In the test case, we don't want the filters inside this
|
||||
@@ -131,39 +128,27 @@ pub async fn send_alert_to_alerter(
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Ntfy(NtfyAlerterEndpoint { url, email }) => {
|
||||
ntfy::send_alert(url, email.as_deref(), alert)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to send alert to ntfy Alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Pushover(PushoverAlerterEndpoint { url }) => {
|
||||
pushover::send_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send alert to Pushover Alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
let res = reqwest::Client::new()
|
||||
.post(url_interpolated)
|
||||
@@ -171,15 +156,9 @@ async fn send_custom_alert(
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with request: {sanitized_error}"
|
||||
))
|
||||
let replacers = secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
let sanitized_error = svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!("Error with request: {}", sanitized_error))
|
||||
})
|
||||
.context("failed at post request to alerter")?;
|
||||
let status = res.status();
|
||||
@@ -234,244 +213,38 @@ fn resource_link(
|
||||
resource_type: ResourceTargetVariant,
|
||||
id: &str,
|
||||
) -> String {
|
||||
komodo_client::entities::resource_link(
|
||||
&core_config().host,
|
||||
resource_type,
|
||||
id,
|
||||
)
|
||||
}
|
||||
let path = match resource_type {
|
||||
ResourceTargetVariant::System => unreachable!(),
|
||||
ResourceTargetVariant::Build => format!("/builds/{id}"),
|
||||
ResourceTargetVariant::Builder => {
|
||||
format!("/builders/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Deployment => {
|
||||
format!("/deployments/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Stack => {
|
||||
format!("/stacks/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Server => {
|
||||
format!("/servers/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Repo => format!("/repos/{id}"),
|
||||
ResourceTargetVariant::Alerter => {
|
||||
format!("/alerters/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Procedure => {
|
||||
format!("/procedures/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Action => {
|
||||
format!("/actions/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
format!("/server-templates/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
format!("/resource-syncs/{id}")
|
||||
}
|
||||
};
|
||||
|
||||
/// Standard message content format
|
||||
/// used by Ntfy, Pushover.
|
||||
fn standard_alert_content(alert: &Alert) -> String {
|
||||
let level = fmt_level(alert.level);
|
||||
match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now connected\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
}
|
||||
format!("{}{path}", core_config().host)
|
||||
}
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
email: Option<&str>,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let content = standard_alert_content(alert);
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, email, content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
url: &str,
|
||||
email: Option<&str>,
|
||||
content: String,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut request = http_client()
|
||||
.post(url)
|
||||
.header("Title", "Komodo Alert")
|
||||
.body(content);
|
||||
|
||||
if let Some(email) = email {
|
||||
request = request.header("X-Email", email);
|
||||
}
|
||||
|
||||
let response =
|
||||
request.send().await.context("Failed to send message")?;
|
||||
|
||||
let status = response.status();
|
||||
if status.is_success() {
|
||||
debug!("ntfy alert sent successfully: {}", status);
|
||||
Ok(())
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send message to ntfy | {status} | failed to get response text"
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
"Failed to send message to ntfy | {status} | {text}",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn http_client() -> &'static reqwest::Client {
|
||||
static CLIENT: OnceLock<reqwest::Client> = OnceLock::new();
|
||||
CLIENT.get_or_init(reqwest::Client::new)
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let content = standard_alert_content(alert);
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, content).await.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
url: &str,
|
||||
content: String,
|
||||
) -> anyhow::Result<()> {
|
||||
// pushover needs all information to be encoded in the URL. At minimum they need
|
||||
// the user key, the application token, and the message (url encoded).
|
||||
// other optional params here: https://pushover.net/api (just add them to the
|
||||
// webhook url along with the application token and the user key).
|
||||
let content = [("message", content)];
|
||||
|
||||
let response = http_client()
|
||||
.post(url)
|
||||
.form(&content)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send message")?;
|
||||
|
||||
let status = response.status();
|
||||
if status.is_success() {
|
||||
debug!("pushover alert sent successfully: {}", status);
|
||||
Ok(())
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send message to pushover | {status} | failed to get response text"
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
"Failed to send message to pushover | {} | {}",
|
||||
status,
|
||||
text
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn http_client() -> &'static reqwest::Client {
|
||||
static CLIENT: OnceLock<reqwest::Client> = OnceLock::new();
|
||||
CLIENT.get_or_init(reqwest::Client::new)
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
use ::slack::types::OwnedBlock as Block;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -24,35 +23,6 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let text = match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Periphery version now matches Core version ✅"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}"
|
||||
)
|
||||
}
|
||||
};
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Server,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -63,11 +33,11 @@ pub async fn send_alert(
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text =
|
||||
format!("{level} | *{name}*{region} is now *connected*");
|
||||
format!("{level} | *{name}*{region} is now *reachable*");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} is now *connnected*"
|
||||
"*{name}*{region} is now *reachable*"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
@@ -103,9 +73,7 @@ pub async fn send_alert(
|
||||
let region = fmt_region(region);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text = format!(
|
||||
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%*"
|
||||
);
|
||||
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
@@ -119,9 +87,7 @@ pub async fn send_alert(
|
||||
(text, blocks.into())
|
||||
}
|
||||
_ => {
|
||||
let text = format!(
|
||||
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈"
|
||||
);
|
||||
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
@@ -147,9 +113,7 @@ pub async fn send_alert(
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text = format!(
|
||||
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾"
|
||||
);
|
||||
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
@@ -166,9 +130,7 @@ pub async fn send_alert(
|
||||
(text, blocks.into())
|
||||
}
|
||||
_ => {
|
||||
let text = format!(
|
||||
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾"
|
||||
);
|
||||
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
@@ -198,9 +160,7 @@ pub async fn send_alert(
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text = format!(
|
||||
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿"
|
||||
);
|
||||
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
@@ -209,17 +169,12 @@ pub async fn send_alert(
|
||||
Block::section(format!(
|
||||
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
|
||||
)),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Server,
|
||||
id,
|
||||
)),
|
||||
Block::section(resource_link(ResourceTargetVariant::Server, id)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
_ => {
|
||||
let text = format!(
|
||||
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿"
|
||||
);
|
||||
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
@@ -228,10 +183,7 @@ pub async fn send_alert(
|
||||
Block::section(format!(
|
||||
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
|
||||
)),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Server,
|
||||
id,
|
||||
)),
|
||||
Block::section(resource_link(ResourceTargetVariant::Server, id)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
@@ -403,7 +355,9 @@ pub async fn send_alert(
|
||||
let text = format!("{level} | Build {name} has failed");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!("version: *v{version}*",)),
|
||||
Block::section(format!(
|
||||
"build name: *{name}*\nversion: *v{version}*",
|
||||
)),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Build,
|
||||
id,
|
||||
@@ -416,6 +370,7 @@ pub async fn send_alert(
|
||||
format!("{level} | Repo build for *{name}* has *failed*");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!("repo name: *{name}*",)),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Repo,
|
||||
id,
|
||||
@@ -423,76 +378,11 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let text = format!("{level} | Procedure *{name}* has *failed*");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Procedure,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let text = format!("{level} | Action *{name}* has *failed*");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Action,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let text = format!(
|
||||
"{level} | *{name}* ({resource_type}) | Scheduled run started 🕝"
|
||||
);
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(*resource_type, id)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
let text = format!("{level} | {message}");
|
||||
let blocks =
|
||||
vec![Block::header(text.clone()), Block::section(details)];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if text.is_empty() {
|
||||
return Ok(());
|
||||
if !text.is_empty() {
|
||||
let slack = ::slack::Client::new(url);
|
||||
slack.send_message(text, blocks).await?;
|
||||
}
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack
|
||||
.send_owned_message_single(&text, None, blocks.as_deref())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
|
||||
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
|
||||
use axum::{http::HeaderMap, routing::post, Router};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::auth::*, entities::user::User};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::{AddStatusCode, Json};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -17,16 +15,13 @@ use crate::{
|
||||
get_user_id_from_headers,
|
||||
github::{self, client::github_oauth_client},
|
||||
google::{self, client::google_oauth_client},
|
||||
oidc::{self, client::oidc_client},
|
||||
oidc,
|
||||
},
|
||||
config::core_config,
|
||||
helpers::query::get_user,
|
||||
state::jwt_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AuthArgs {
|
||||
pub headers: HeaderMap,
|
||||
}
|
||||
@@ -43,16 +38,14 @@ pub struct AuthArgs {
|
||||
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
|
||||
pub enum AuthRequest {
|
||||
GetLoginOptions(GetLoginOptions),
|
||||
SignUpLocalUser(SignUpLocalUser),
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
LoginLocalUser(LoginLocalUser),
|
||||
ExchangeForJwt(ExchangeForJwt),
|
||||
GetUser(GetUser),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
let mut router = Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler));
|
||||
let mut router = Router::new().route("/", post(handler));
|
||||
|
||||
if core_config().local_auth {
|
||||
info!("🔑 Local Login Enabled");
|
||||
@@ -64,7 +57,7 @@ pub fn router() -> Router {
|
||||
}
|
||||
|
||||
if google_oauth_client().is_some() {
|
||||
info!("🔑 Google Login Enabled");
|
||||
info!("🔑 Github Login Enabled");
|
||||
router = router.nest("/google", google::router())
|
||||
}
|
||||
|
||||
@@ -76,18 +69,7 @@ pub fn router() -> Router {
|
||||
router
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
headers: HeaderMap,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: AuthRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(headers, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
|
||||
async fn handler(
|
||||
headers: HeaderMap,
|
||||
Json(request): Json<AuthRequest>,
|
||||
@@ -115,15 +97,23 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
let config = core_config();
|
||||
GetLoginOptionsResponse {
|
||||
local: config.local_auth,
|
||||
github: github_oauth_client().is_some(),
|
||||
google: google_oauth_client().is_some(),
|
||||
oidc: oidc_client().load().is_some(),
|
||||
github: config.github_oauth.enabled
|
||||
&& !config.github_oauth.id.is_empty()
|
||||
&& !config.github_oauth.secret.is_empty(),
|
||||
google: config.google_oauth.enabled
|
||||
&& !config.google_oauth.id.is_empty()
|
||||
&& !config.google_oauth.secret.is_empty(),
|
||||
oidc: config.oidc_enabled
|
||||
&& !config.oidc_provider.is_empty()
|
||||
&& !config.oidc_client_id.is_empty()
|
||||
&& !config.oidc_client_secret.is_empty(),
|
||||
registration_disabled: config.disable_user_registration,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for GetLoginOptions {
|
||||
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
@@ -133,27 +123,23 @@ impl Resolve<AuthArgs> for GetLoginOptions {
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for ExchangeForJwt {
|
||||
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
) -> serror::Result<ExchangeForJwtResponse> {
|
||||
jwt_client()
|
||||
.redeem_exchange_token(&self.token)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
let jwt = jwt_client().redeem_exchange_token(&self.token).await?;
|
||||
Ok(ExchangeForJwtResponse { jwt })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for GetUser {
|
||||
#[instrument(name = "GetUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
AuthArgs { headers }: &AuthArgs,
|
||||
) -> serror::Result<User> {
|
||||
let user_id = get_user_id_from_headers(headers)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
get_user(&user_id)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)
|
||||
let user_id = get_user_id_from_headers(headers).await?;
|
||||
Ok(get_user(&user_id).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,47 +1,39 @@
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use command::run_komodo_standard_command;
|
||||
use config::merge_objects;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use interpolate::Interpolator;
|
||||
use command::run_komodo_command;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
|
||||
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
|
||||
},
|
||||
entities::{
|
||||
FileFormat, JsonObject,
|
||||
action::Action,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
random_string,
|
||||
update::Update,
|
||||
user::action_user,
|
||||
action::Action, config::core::CoreConfig,
|
||||
permission::PermissionLevel, update::Update, user::action_user,
|
||||
},
|
||||
parsers::parse_key_value_list,
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
api::{execute::ExecuteRequest, user::UserArgs},
|
||||
config::core_config,
|
||||
helpers::{
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
query::get_variables_and_secrets,
|
||||
random_string,
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource::refresh_action_state_cache,
|
||||
resource::{self, refresh_action_state_cache},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
@@ -50,26 +42,15 @@ use super::ExecuteArgs;
|
||||
impl super::BatchExecute for BatchRunAction {
|
||||
type Resource = Action;
|
||||
fn single_request(action: String) -> ExecuteRequest {
|
||||
ExecuteRequest::RunAction(RunAction {
|
||||
action,
|
||||
args: Default::default(),
|
||||
})
|
||||
ExecuteRequest::RunAction(RunAction { action })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunAction {
|
||||
#[instrument(
|
||||
"BatchRunAction",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchRunAction>(&self.pattern, user)
|
||||
@@ -79,24 +60,15 @@ impl Resolve<ExecuteArgs> for BatchRunAction {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunAction {
|
||||
#[instrument(
|
||||
"RunAction",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
action = self.action,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RunAction", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut action = get_check_permissions::<Action>(
|
||||
let mut action = resource::get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -108,33 +80,13 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure action not already busy before updating.
|
||||
let _action_guard = action_state.update_custom(
|
||||
|state| state.running += 1,
|
||||
|state| state.running -= 1,
|
||||
false,
|
||||
)?;
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let default_args = parse_action_arguments(
|
||||
&action.config.arguments,
|
||||
action.config.arguments_format,
|
||||
)
|
||||
.context("Failed to parse default Action arguments")?;
|
||||
|
||||
let args = merge_objects(
|
||||
default_args,
|
||||
self.args.unwrap_or_default(),
|
||||
true,
|
||||
true,
|
||||
)
|
||||
.context("Failed to merge request args with default args")?;
|
||||
|
||||
let args = serde_json::to_string(&args)
|
||||
.context("Failed to serialize action run arguments")?;
|
||||
|
||||
let CreateApiKeyResponse { key, secret } = CreateApiKey {
|
||||
name: update.id.clone(),
|
||||
expires: 0,
|
||||
@@ -147,7 +99,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let contents = &mut action.config.file_contents;
|
||||
|
||||
// Wrap the file contents in the execution context.
|
||||
*contents = full_contents(contents, &args, &key, &secret);
|
||||
*contents = full_contents(contents, &key, &secret);
|
||||
|
||||
let replacers =
|
||||
interpolate(contents, &mut update, key.clone(), secret.clone())
|
||||
@@ -158,34 +110,20 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let file = format!("{}.ts", random_string(10));
|
||||
let path = core_config().action_directory.join(&file);
|
||||
|
||||
secret_file::write_async(&path, contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write action file to {path:?}")
|
||||
})?;
|
||||
if let Some(parent) = path.parent() {
|
||||
let _ = fs::create_dir_all(parent).await;
|
||||
}
|
||||
|
||||
let CoreConfig { ssl_enabled, .. } = core_config();
|
||||
fs::write(&path, contents).await.with_context(|| {
|
||||
format!("Failed to write action file to {path:?}")
|
||||
})?;
|
||||
|
||||
let https_cert_flag = if *ssl_enabled {
|
||||
" --unsafely-ignore-certificate-errors=localhost"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
let reload = if action.config.reload_deno_deps {
|
||||
" --reload"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
let mut res = run_komodo_standard_command(
|
||||
let mut res = run_komodo_command(
|
||||
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
|
||||
"Execute Action",
|
||||
None,
|
||||
format!(
|
||||
"deno run --allow-all{https_cert_flag}{reload} {}",
|
||||
path.display()
|
||||
),
|
||||
format!("deno run --allow-all {}", path.display()),
|
||||
false,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -219,7 +157,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -228,68 +166,48 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success && action.config.failure_alert {
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
target,
|
||||
ts: komodo_timestamp(),
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::ActionFailed {
|
||||
id: action.id,
|
||||
name: action.name,
|
||||
},
|
||||
};
|
||||
send_alerts(&[alert]).await
|
||||
});
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("Interpolate", skip(contents, update, secret))]
|
||||
async fn interpolate(
|
||||
contents: &mut String,
|
||||
update: &mut Update,
|
||||
key: String,
|
||||
secret: String,
|
||||
) -> serror::Result<HashSet<(String, String)>> {
|
||||
let VariablesAndSecrets {
|
||||
variables,
|
||||
mut secrets,
|
||||
} = get_variables_and_secrets().await?;
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
secrets.insert(String::from("ACTION_API_KEY"), key);
|
||||
secrets.insert(String::from("ACTION_API_SECRET"), secret);
|
||||
vars_and_secrets
|
||||
.secrets
|
||||
.insert(String::from("ACTION_API_KEY"), key);
|
||||
vars_and_secrets
|
||||
.secrets
|
||||
.insert(String::from("ACTION_API_SECRET"), secret);
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolator
|
||||
.interpolate_string(contents)?
|
||||
.push_logs(&mut update.logs);
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
contents,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
Ok(interpolator.secret_replacers)
|
||||
add_interp_update_log(update, &global_replacers, &secret_replacers);
|
||||
|
||||
Ok(secret_replacers)
|
||||
}
|
||||
|
||||
fn full_contents(
|
||||
contents: &str,
|
||||
// Pre-serialized to JSON string.
|
||||
args: &str,
|
||||
key: &str,
|
||||
secret: &str,
|
||||
) -> String {
|
||||
fn full_contents(contents: &str, key: &str, secret: &str) -> String {
|
||||
let CoreConfig {
|
||||
port, ssl_enabled, ..
|
||||
} = core_config();
|
||||
let protocol = if *ssl_enabled { "https" } else { "http" };
|
||||
let base_url = format!("{protocol}://localhost:{port}");
|
||||
format!(
|
||||
"import {{ KomodoClient, Types }} from '{base_url}/client/lib.js';
|
||||
"import {{ KomodoClient }} from '{base_url}/client/lib.js';
|
||||
import * as __YAML__ from 'jsr:@std/yaml';
|
||||
import * as __TOML__ from 'jsr:@std/toml';
|
||||
|
||||
@@ -307,8 +225,6 @@ const TOML = {{
|
||||
parseCargoToml: __TOML__.parse,
|
||||
}}
|
||||
|
||||
const ARGS = {args};
|
||||
|
||||
const komodo = KomodoClient('{base_url}', {{
|
||||
type: 'api-key',
|
||||
params: {{ key: '{key}', secret: '{secret}' }}
|
||||
@@ -327,7 +243,7 @@ main()
|
||||
console.error('Status:', error.status);
|
||||
console.error(JSON.stringify(error.result, null, 2));
|
||||
}} else {{
|
||||
console.error(error);
|
||||
console.error(JSON.stringify(error, null, 2));
|
||||
}}
|
||||
Deno.exit(1)
|
||||
}});"
|
||||
@@ -337,7 +253,6 @@ main()
|
||||
/// Cleans up file at given path.
|
||||
/// ALSO if $DENO_DIR is set,
|
||||
/// will clean up the generated file matching "file"
|
||||
#[instrument("CleanupRun")]
|
||||
async fn cleanup_run(file: String, path: &Path) {
|
||||
if let Err(e) = fs::remove_file(path).await {
|
||||
warn!(
|
||||
@@ -357,7 +272,7 @@ fn deno_dir() -> Option<&'static Path> {
|
||||
DENO_DIR
|
||||
.get_or_init(|| {
|
||||
let deno_dir = std::env::var("DENO_DIR").ok()?;
|
||||
Some(PathBuf::from(&deno_dir))
|
||||
PathBuf::from_str(&deno_dir).ok()
|
||||
})
|
||||
.as_deref()
|
||||
}
|
||||
@@ -390,8 +305,8 @@ fn delete_file(
|
||||
if name == file {
|
||||
if let Err(e) = fs::remove_file(entry.path()).await {
|
||||
warn!(
|
||||
"Failed to clean up generated file after action execution | {e:#}"
|
||||
);
|
||||
"Failed to clean up generated file after action execution | {e:#}"
|
||||
);
|
||||
};
|
||||
return true;
|
||||
}
|
||||
@@ -415,25 +330,3 @@ fn delete_file(
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_action_arguments(
|
||||
args: &str,
|
||||
format: FileFormat,
|
||||
) -> anyhow::Result<JsonObject> {
|
||||
match format {
|
||||
FileFormat::KeyValue => {
|
||||
let args = parse_key_value_list(args)
|
||||
.context("Failed to parse args as key value list")?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, serde_json::Value::String(v)))
|
||||
.collect();
|
||||
Ok(args)
|
||||
}
|
||||
FileFormat::Toml => toml::from_str(args)
|
||||
.context("Failed to parse Toml to Action args"),
|
||||
FileFormat::Yaml => serde_yaml_ng::from_str(args)
|
||||
.context("Failed to parse Yaml to action args"),
|
||||
FileFormat::Json => serde_json::from_str(args)
|
||||
.context("Failed to parse Json to action args"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,47 +1,32 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use futures_util::{
|
||||
StreamExt, TryStreamExt, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::execute::{SendAlert, TestAlerter},
|
||||
api::execute::TestAlerter,
|
||||
entities::{
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
alert::send_alert_to_alerter, helpers::update::update_update,
|
||||
permission::get_check_permissions, resource::list_full_for_user,
|
||||
resource::get_check_permissions,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
#[instrument(
|
||||
"TestAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
alerter = self.alerter,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "TestAlerter", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerter = get_check_permissions::<Alerter>(
|
||||
&self.alerter,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -86,106 +71,3 @@ impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ExecuteArgs> for SendAlert {
|
||||
#[instrument(
|
||||
"SendAlert",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
request = format!("{self:?}"),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerters = list_full_for_user::<Alerter>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|a| {
|
||||
a.config.enabled
|
||||
&& (self.alerters.is_empty()
|
||||
|| self.alerters.contains(&a.name)
|
||||
|| self.alerters.contains(&a.id))
|
||||
&& (a.config.alert_types.is_empty()
|
||||
|| a.config.alert_types.contains(&AlertDataVariant::Custom))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let alerters = if user.admin {
|
||||
alerters
|
||||
} else {
|
||||
// Only keep alerters with execute permissions
|
||||
alerters
|
||||
.into_iter()
|
||||
.map(|alerter| async move {
|
||||
get_check_permissions::<Alerter>(
|
||||
&alerter.id,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect()
|
||||
};
|
||||
|
||||
if alerters.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"
|
||||
).status_code(StatusCode::BAD_REQUEST));
|
||||
}
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let ts = komodo_timestamp();
|
||||
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
resolved: true,
|
||||
level: self.level,
|
||||
target: update.target.clone(),
|
||||
data: AlertData::Custom {
|
||||
message: self.message,
|
||||
details: self.details,
|
||||
},
|
||||
resolved_ts: Some(ts),
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"Send alert",
|
||||
serde_json::to_string_pretty(&alert)
|
||||
.context("Failed to serialize alert to JSON")?,
|
||||
);
|
||||
|
||||
if let Err(e) = alerters
|
||||
.iter()
|
||||
.map(|alerter| send_alert_to_alerter(alerter, &alert))
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
{
|
||||
update.push_error_log("Send Error", format_serror(&e.into()));
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,26 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::IntoFuture,
|
||||
time::Duration,
|
||||
};
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
|
||||
RunBuild,
|
||||
},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
all_logs_success,
|
||||
build::{Build, BuildConfig, ImageRegistryConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
update::{Log, Update},
|
||||
user::auto_redeploy_user,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
@@ -13,50 +28,26 @@ use database::mungos::{
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use futures_util::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
execute::{
|
||||
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
|
||||
RunBuild,
|
||||
},
|
||||
write::RefreshBuildCache,
|
||||
},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
all_logs_success,
|
||||
build::{Build, BuildConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
update::{Log, Update},
|
||||
user::auto_redeploy_user,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
build_git_token,
|
||||
builder::{cleanup_builder_instance, connect_builder_periphery},
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::build_cancel_channel,
|
||||
query::{
|
||||
VariablesAndSecrets, get_deployment_state,
|
||||
get_variables_and_secrets,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
query::{get_deployment_state, get_variables_and_secrets},
|
||||
registry_token,
|
||||
update::{init_execution_update, update_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource::{self, refresh_build_state_cache},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
@@ -71,18 +62,10 @@ impl super::BatchExecute for BatchRunBuild {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunBuild {
|
||||
#[instrument(
|
||||
"BatchRunBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchRunBuild", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchRunBuild>(&self.pattern, user)
|
||||
@@ -92,47 +75,18 @@ impl Resolve<ExecuteArgs> for BatchRunBuild {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunBuild {
|
||||
#[instrument(
|
||||
"RunBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RunBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut repo = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&build.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let VariablesAndSecrets {
|
||||
mut variables,
|
||||
secrets,
|
||||
} = get_variables_and_secrets().await?;
|
||||
|
||||
// Add the $VERSION to variables. Use with [[$VERSION]]
|
||||
variables.insert(
|
||||
String::from("$VERSION"),
|
||||
build.config.version.to_string(),
|
||||
);
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
if build.config.builder_id.is_empty() {
|
||||
return Err(anyhow!("Must attach builder to RunBuild").into());
|
||||
@@ -156,11 +110,26 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.version = build.config.version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token =
|
||||
build_git_token(&mut build, repo.as_mut()).await?;
|
||||
// Add the $VERSION to variables. Use with [[$VERSION]]
|
||||
if !vars_and_secrets.variables.contains_key("$VERSION") {
|
||||
vars_and_secrets.variables.insert(
|
||||
String::from("$VERSION"),
|
||||
build.config.version.to_string(),
|
||||
);
|
||||
}
|
||||
|
||||
let registry_tokens =
|
||||
validate_account_extract_registry_tokens(&build).await?;
|
||||
let git_token = git_token(
|
||||
&build.config.git_provider,
|
||||
&build.config.git_account,
|
||||
|https| build.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account),
|
||||
)?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
@@ -190,7 +159,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.finalize();
|
||||
let id = update.id.clone();
|
||||
if let Err(e) = update_update(update).await {
|
||||
warn!("Failed to modify Update {id} on db | {e:#}");
|
||||
warn!("failed to modify Update {id} on db | {e:#}");
|
||||
}
|
||||
if !is_server_builder {
|
||||
cancel_clone.cancel();
|
||||
@@ -208,7 +177,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
});
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
let (periphery, cleanup_data) = match connect_builder_periphery(
|
||||
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
build.name.clone(),
|
||||
Some(build.config.version),
|
||||
builder,
|
||||
@@ -219,12 +189,12 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
Ok(builder) => builder,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to get Builder for Build {} | {e:#}",
|
||||
"failed to get builder for build {} | {e:#}",
|
||||
build.name
|
||||
);
|
||||
update.logs.push(Log::error(
|
||||
"Get Builder",
|
||||
format_serror(&e.context("Failed to get Builder").into()),
|
||||
"get builder",
|
||||
format_serror(&e.context("failed to get builder").into()),
|
||||
));
|
||||
return handle_early_return(
|
||||
update, build.id, build.name, false,
|
||||
@@ -233,93 +203,124 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
}
|
||||
};
|
||||
|
||||
// INTERPOLATE VARIABLES
|
||||
// CLONE REPO
|
||||
let secret_replacers = if !build.config.skip_secret_interp {
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
// Interpolate variables / secrets into pre build command
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolator.interpolate_build(&mut build)?;
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.pre_build,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
interpolator.secret_replacers
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let commit_message = if !build.config.files_on_host
|
||||
&& (!build.config.repo.is_empty()
|
||||
|| !build.config.linked_repo.is_empty())
|
||||
{
|
||||
// PULL OR CLONE REPO
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::PullOrCloneRepo {
|
||||
args: repo.as_ref().map(Into::into).unwrap_or((&build).into()),
|
||||
git_token,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: Default::default(),
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("Build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("Build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("Builder cleaned up");
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
};
|
||||
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("Finished repo clone");
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash =
|
||||
res.res.commit_hash.unwrap_or_default().to_string();
|
||||
res.res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed build at clone repo | {e:#}");
|
||||
update.push_error_log(
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Some(commit_message)
|
||||
} else {
|
||||
None
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&build).into(),
|
||||
git_token,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
};
|
||||
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.commit_hash =
|
||||
res.commit_hash.unwrap_or_default().to_string();
|
||||
res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed build at clone repo | {e:#}");
|
||||
update.push_error_log(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if all_logs_success(&update.logs) {
|
||||
// RUN BUILD
|
||||
let secret_replacers = if !build.config.skip_secret_interp {
|
||||
// Interpolate variables / secrets into build args
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.build_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.secret_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
repo,
|
||||
registry_tokens,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
// To push a commit hash tagged image
|
||||
commit_hash: optional_string(&update.commit_hash),
|
||||
// Unused for now
|
||||
additional_tags: Default::default(),
|
||||
}) => res.context("Failed at call to Periphery to build"),
|
||||
// Push a commit hash tagged image
|
||||
additional_tags: if update.commit_hash.is_empty() {
|
||||
Default::default()
|
||||
} else {
|
||||
vec![update.commit_hash.clone()]
|
||||
},
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("Build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("Build cancelled", String::from("User cancelled build during docker build"));
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
@@ -332,10 +333,10 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.logs.extend(logs);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Error in build | {e:#}");
|
||||
warn!("error in build | {e:#}");
|
||||
update.push_error_log(
|
||||
"Build Error",
|
||||
format_serror(&e.context("Failed to build").into()),
|
||||
"build",
|
||||
format_serror(&e.context("failed to build").into()),
|
||||
)
|
||||
}
|
||||
};
|
||||
@@ -364,8 +365,6 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
// stop the cancel listening task from going forever
|
||||
cancel.cancel();
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
@@ -377,7 +376,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -386,15 +385,13 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let Build { id, name, .. } = build;
|
||||
|
||||
if update.success {
|
||||
// don't hold response up for user
|
||||
tokio::spawn(async move {
|
||||
handle_post_build_redeploy(&id).await;
|
||||
handle_post_build_redeploy(&build.id).await;
|
||||
});
|
||||
} else {
|
||||
let name = name.clone();
|
||||
warn!("build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
let version = update.version;
|
||||
tokio::spawn(async move {
|
||||
@@ -405,27 +402,21 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::BuildFailed { id, name, version },
|
||||
data: AlertData::BuildFailed {
|
||||
id: build.id,
|
||||
name: build.name,
|
||||
version,
|
||||
},
|
||||
};
|
||||
send_alerts(&[alert]).await
|
||||
});
|
||||
}
|
||||
|
||||
if let Err(e) = (RefreshBuildCache { build: name })
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh build cache",
|
||||
format_serror(&e.error.into()),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(update.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("HandleEarlyReturn", skip(update))]
|
||||
#[instrument(skip(update))]
|
||||
async fn handle_early_return(
|
||||
mut update: Update,
|
||||
build_id: String,
|
||||
@@ -441,7 +432,7 @@ async fn handle_early_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -449,6 +440,7 @@ async fn handle_early_return(
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
let version = update.version;
|
||||
tokio::spawn(async move {
|
||||
@@ -518,24 +510,15 @@ pub async fn validate_cancel_build(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
#[instrument(
|
||||
"CancelBuild",
|
||||
skip(user, update),
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CancelBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let build = get_check_permissions::<Build>(
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -577,9 +560,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to set CancelBuild Update status Complete after timeout | {e:#}"
|
||||
)
|
||||
warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}")
|
||||
}
|
||||
});
|
||||
|
||||
@@ -587,7 +568,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("PostBuildRedeploy")]
|
||||
#[instrument]
|
||||
async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let Ok(redeploy_deployments) = find_collect(
|
||||
&db_client().deployments,
|
||||
@@ -606,9 +587,8 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
redeploy_deployments
|
||||
.into_iter()
|
||||
.map(|deployment| async move {
|
||||
let state = get_deployment_state(&deployment.id)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
let state =
|
||||
get_deployment_state(&deployment).await.unwrap_or_default();
|
||||
if state == DeploymentState::Running {
|
||||
let req = super::ExecuteRequest::Deploy(Deploy {
|
||||
deployment: deployment.id.clone(),
|
||||
@@ -623,11 +603,7 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
}
|
||||
.resolve(&ExecuteArgs {
|
||||
user,
|
||||
update,
|
||||
id: Uuid::new_v4(),
|
||||
})
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
}
|
||||
.await;
|
||||
@@ -653,49 +629,34 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token matching requirements.
|
||||
/// Otherwise it is left to periphery.
|
||||
#[instrument("ValidateRegistryTokens")]
|
||||
async fn validate_account_extract_registry_tokens(
|
||||
async fn validate_account_extract_registry_token(
|
||||
Build {
|
||||
config: BuildConfig { image_registry, .. },
|
||||
config:
|
||||
BuildConfig {
|
||||
image_registry:
|
||||
ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
},
|
||||
..
|
||||
},
|
||||
..
|
||||
}: &Build,
|
||||
// Maps (domain, account) -> token
|
||||
) -> serror::Result<Vec<(String, String, String)>> {
|
||||
let mut res = HashMap::with_capacity(image_registry.capacity());
|
||||
|
||||
for (domain, account) in image_registry
|
||||
.iter()
|
||||
.map(|r| (r.domain.as_str(), r.account.as_str()))
|
||||
// This ensures uniqueness / prevents redundant logins
|
||||
.collect::<HashSet<_>>()
|
||||
{
|
||||
if domain.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let Some(registry_token) = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)? else {
|
||||
continue;
|
||||
};
|
||||
|
||||
res.insert(
|
||||
(domain.to_string(), account.to_string()),
|
||||
registry_token,
|
||||
) -> serror::Result<Option<String>> {
|
||||
if domain.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(
|
||||
res
|
||||
.into_iter()
|
||||
.map(|((domain, account), token)| (domain, account, token))
|
||||
.collect(),
|
||||
)
|
||||
let registry_token = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?;
|
||||
|
||||
Ok(registry_token)
|
||||
}
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
use std::sync::OnceLock;
|
||||
use std::{collections::HashSet, sync::OnceLock};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::{anyhow, Context};
|
||||
use cache::TimeoutCache;
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
Version,
|
||||
build::{Build, ImageRegistryConfig},
|
||||
deployment::{
|
||||
Deployment, DeploymentImage, extract_registry_domain,
|
||||
extract_registry_domain, Deployment, DeploymentImage,
|
||||
},
|
||||
komodo_timestamp, optional_string,
|
||||
get_image_name, komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
Version,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
@@ -24,13 +23,17 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
query::get_variables_and_secrets,
|
||||
registry_token,
|
||||
update::update_update,
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::action_states,
|
||||
};
|
||||
@@ -49,18 +52,10 @@ impl super::BatchExecute for BatchDeploy {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDeploy {
|
||||
#[instrument(
|
||||
"BatchDeploy",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchDeploy", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDeploy>(&self.pattern, user)
|
||||
@@ -69,15 +64,14 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("SetupDeploy", skip_all)]
|
||||
async fn setup_deployment_execution(
|
||||
deployment: &str,
|
||||
user: &User,
|
||||
) -> anyhow::Result<(Deployment, Server)> {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
deployment,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -96,21 +90,10 @@ async fn setup_deployment_execution(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for Deploy {
|
||||
#[instrument(
|
||||
"Deploy",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
stop_signal = format!("{:?}", self.stop_signal),
|
||||
stop_time = self.stop_time,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "Deploy", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -135,11 +118,8 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
let (version, registry_token) = match &deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(build_id).await?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version
|
||||
} else {
|
||||
@@ -156,27 +136,21 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version_str}"),
|
||||
};
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
(version, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = first_registry;
|
||||
} = build.config.image_registry;
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
deployment.config.image_registry_account =
|
||||
account.to_string();
|
||||
deployment.config.image_registry_account = account
|
||||
}
|
||||
let token = if !deployment
|
||||
.config
|
||||
.image_registry_account
|
||||
.is_empty()
|
||||
{
|
||||
registry_token(domain, &deployment.config.image_registry_account).await.with_context(
|
||||
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
|
||||
)?
|
||||
} else {
|
||||
@@ -205,17 +179,53 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers = if !deployment.config.skip_secret_interp {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolator
|
||||
.interpolate_deployment(&mut deployment)?
|
||||
.push_logs(&mut update.logs);
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolator.secret_replacers
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.ports,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.volumes,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.command,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
@@ -223,8 +233,7 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
update.version = version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
match periphery_client(&server)?
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal: self.stop_signal,
|
||||
@@ -243,7 +252,7 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
}
|
||||
};
|
||||
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -263,14 +272,6 @@ fn pull_cache() -> &'static PullCache {
|
||||
PULL_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"PullDeploymentInner",
|
||||
skip_all,
|
||||
fields(
|
||||
deployment = deployment.id,
|
||||
server = server.id
|
||||
)
|
||||
)]
|
||||
pub async fn pull_deployment_inner(
|
||||
deployment: Deployment,
|
||||
server: &Server,
|
||||
@@ -278,11 +279,8 @@ pub async fn pull_deployment_inner(
|
||||
let (image, account, token) = match deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version.to_string()
|
||||
} else {
|
||||
@@ -296,31 +294,26 @@ pub async fn pull_deployment_inner(
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
let image = format!("{image_name}:{version}");
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
(image, None, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = first_registry;
|
||||
} = build.config.image_registry;
|
||||
let account =
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
account
|
||||
} else {
|
||||
&deployment.config.image_registry_account
|
||||
deployment.config.image_registry_account
|
||||
};
|
||||
let token = if !account.is_empty() {
|
||||
registry_token(domain, account).await.with_context(
|
||||
registry_token(&domain, &account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(image, optional_string(account), token)
|
||||
(image, optional_string(&account), token)
|
||||
}
|
||||
}
|
||||
DeploymentImage::Image { image } => {
|
||||
@@ -360,9 +353,8 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
let res = async {
|
||||
let log = match periphery_client(server)
|
||||
.await?
|
||||
.request(api::docker::PullImage {
|
||||
let log = match periphery_client(server)?
|
||||
.request(api::image::PullImage {
|
||||
name: image,
|
||||
account,
|
||||
token,
|
||||
@@ -373,7 +365,7 @@ pub async fn pull_deployment_inner(
|
||||
Err(e) => Log::error("Pull image", format_serror(&e.into())),
|
||||
};
|
||||
|
||||
update_cache_for_server(server, true).await;
|
||||
update_cache_for_server(server).await;
|
||||
anyhow::Ok(log)
|
||||
}
|
||||
.await;
|
||||
@@ -386,19 +378,10 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullDeployment {
|
||||
#[instrument(
|
||||
"PullDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PullDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -429,19 +412,10 @@ impl Resolve<ExecuteArgs> for PullDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
#[instrument(
|
||||
"StartDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "StartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -462,8 +436,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -477,7 +450,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -486,19 +459,10 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
#[instrument(
|
||||
"RestartDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RestartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -519,8 +483,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::container::RestartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -536,7 +499,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -545,19 +508,10 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
#[instrument(
|
||||
"PauseDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -578,8 +532,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::container::PauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -593,7 +546,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -602,22 +555,13 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
#[instrument(
|
||||
"UnpauseDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UnpauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
setup_deployment_execution(&self.deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
@@ -635,8 +579,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::container::UnpauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -652,7 +595,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -661,24 +604,13 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
#[instrument(
|
||||
"StopDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "StopDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
setup_deployment_execution(&self.deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
@@ -696,8 +628,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -719,7 +650,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -739,18 +670,10 @@ impl super::BatchExecute for BatchDestroyDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
|
||||
#[instrument(
|
||||
"BatchDestroyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchDestroyDeployment", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDestroyDeployment>(
|
||||
@@ -763,21 +686,10 @@ impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
#[instrument(
|
||||
"DestroyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DestroyDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -798,8 +710,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -822,7 +733,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
|
||||
@@ -1,588 +0,0 @@
|
||||
use std::{fmt::Write as _, sync::OnceLock};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_standard_command;
|
||||
use database::{
|
||||
bson::{Document, doc},
|
||||
mungos::find::find_collect,
|
||||
};
|
||||
use formatting::{bold, format_serror};
|
||||
use futures_util::{StreamExt, stream::FuturesOrdered};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
|
||||
RotateAllServerKeys, RotateCoreKeys,
|
||||
},
|
||||
entities::{
|
||||
deployment::DeploymentState, server::ServerState,
|
||||
stack::StackState,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
api::execute::{
|
||||
ExecuteArgs, pull_deployment_inner, pull_stack_inner,
|
||||
},
|
||||
config::{core_config, core_keys},
|
||||
helpers::{periphery_client, update::update_update},
|
||||
resource::rotate_server_keys,
|
||||
state::{
|
||||
db_client, deployment_status_cache, server_status_cache,
|
||||
stack_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn clear_repo_cache_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
#[instrument(
|
||||
"ClearRepoCache",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = clear_repo_cache_lock()
|
||||
.try_lock()
|
||||
.context("Clear already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let mut contents =
|
||||
tokio::fs::read_dir(&core_config().repo_directory)
|
||||
.await
|
||||
.context("Failed to read repo cache directory")?;
|
||||
|
||||
loop {
|
||||
let path = match contents
|
||||
.next_entry()
|
||||
.await
|
||||
.context("Failed to read contents at path")
|
||||
{
|
||||
Ok(Some(contents)) => contents.path(),
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Read Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if path.is_dir() {
|
||||
match tokio::fs::remove_dir_all(&path)
|
||||
.await
|
||||
.context("Failed to clear contents at path")
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Clear Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn backup_database_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BackupCoreDatabase {
|
||||
#[instrument(
|
||||
"BackupCoreDatabase",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = backup_database_lock()
|
||||
.try_lock()
|
||||
.context("Backup already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let res = run_komodo_standard_command(
|
||||
"Backup Core Database",
|
||||
None,
|
||||
"km database backup --yes",
|
||||
)
|
||||
.await;
|
||||
|
||||
update.logs.push(res);
|
||||
update.finalize();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_update_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
#[instrument(
|
||||
"GlobalAutoUpdate",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_update_lock()
|
||||
.try_lock()
|
||||
.context("Global update already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
// This is all done in sequence because there is no rush,
|
||||
// the pulls / deploys happen spaced out to ease the load on system.
|
||||
let servers = find_collect(&db_client().servers, None, None)
|
||||
.await
|
||||
.context("Failed to query for servers from database")?;
|
||||
|
||||
let query = doc! {
|
||||
"$or": [
|
||||
{ "config.poll_for_updates": true },
|
||||
{ "config.auto_update": true }
|
||||
]
|
||||
};
|
||||
|
||||
let (stacks, repos) = tokio::try_join!(
|
||||
find_collect(&db_client().stacks, query.clone(), None),
|
||||
find_collect(&db_client().repos, None, None)
|
||||
)
|
||||
.context("Failed to query for resources from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
let stack_status_cache = stack_status_cache();
|
||||
|
||||
// Will be edited later at update.logs[0]
|
||||
update.push_simple_log("Auto Pull", String::new());
|
||||
|
||||
for stack in stacks {
|
||||
let Some(status) = stack_status_cache.get(&stack.id).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
// Only pull running stacks.
|
||||
if !matches!(status.curr.state, StackState::Running) {
|
||||
continue;
|
||||
}
|
||||
if let Some(server) =
|
||||
servers.iter().find(|s| s.id == stack.config.server_id)
|
||||
// This check is probably redundant along with running check
|
||||
// but shouldn't hurt
|
||||
&& server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| matches!(s.state, ServerState::Ok))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let name = stack.name.clone();
|
||||
let repo = if stack.config.linked_repo.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let Some(repo) =
|
||||
repos.iter().find(|r| r.id == stack.config.linked_repo)
|
||||
else {
|
||||
update.push_error_log(
|
||||
&format!("Pull Stack {name}"),
|
||||
format!(
|
||||
"Did not find any Repo matching {}",
|
||||
stack.config.linked_repo
|
||||
),
|
||||
);
|
||||
continue;
|
||||
};
|
||||
Some(repo.clone())
|
||||
};
|
||||
if let Err(e) =
|
||||
pull_stack_inner(stack, Vec::new(), server, repo, None)
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
&format!("Pull Stack {name}"),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
} else {
|
||||
if !update.logs[0].stdout.is_empty() {
|
||||
update.logs[0].stdout.push('\n');
|
||||
}
|
||||
update.logs[0]
|
||||
.stdout
|
||||
.push_str(&format!("Pulled Stack {} ✅", bold(name)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let deployment_status_cache = deployment_status_cache();
|
||||
let deployments =
|
||||
find_collect(&db_client().deployments, query, None)
|
||||
.await
|
||||
.context("Failed to query for deployments from database")?;
|
||||
for deployment in deployments {
|
||||
let Some(status) =
|
||||
deployment_status_cache.get(&deployment.id).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
// Only pull running deployments.
|
||||
if !matches!(status.curr.state, DeploymentState::Running) {
|
||||
continue;
|
||||
}
|
||||
if let Some(server) =
|
||||
servers.iter().find(|s| s.id == deployment.config.server_id)
|
||||
// This check is probably redundant along with running check
|
||||
// but shouldn't hurt
|
||||
&& server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| matches!(s.state, ServerState::Ok))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let name = deployment.name.clone();
|
||||
if let Err(e) =
|
||||
pull_deployment_inner(deployment, server).await
|
||||
{
|
||||
update.push_error_log(
|
||||
&format!("Pull Deployment {name}"),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
} else {
|
||||
if !update.logs[0].stdout.is_empty() {
|
||||
update.logs[0].stdout.push('\n');
|
||||
}
|
||||
update.logs[0].stdout.push_str(&format!(
|
||||
"Pulled Deployment {} ✅",
|
||||
bold(name)
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_rotate_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
|
||||
#[instrument(
|
||||
"RotateAllServerKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Key rotation already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let mut servers = db_client()
|
||||
.servers
|
||||
.find(Document::new())
|
||||
.await
|
||||
.context("Failed to query servers from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
|
||||
let mut log = String::new();
|
||||
|
||||
while let Some(server) = servers.next().await {
|
||||
let server = match server {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
warn!("Failed to parse Server | {e:#}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if !server.config.auto_rotate_keys {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Key Rotation Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let Some(status) = server_status_cache.get(&server.id).await
|
||||
else {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: No Status ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
};
|
||||
match status.state {
|
||||
ServerState::Disabled => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Not Ok ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
match rotate_server_keys(&server).await {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated keys for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate {} keys",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Server Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateCoreKeys {
|
||||
#[instrument(
|
||||
"RotateCoreKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
force = self.force,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Key rotation already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let core_keys = core_keys();
|
||||
|
||||
if !core_keys.rotatable() {
|
||||
return Err(anyhow!("Core `private_key` must be pointing to file, for example 'file:/config/keys/core.key'").into());
|
||||
};
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
let servers =
|
||||
find_collect(&db_client().servers, Document::new(), None)
|
||||
.await
|
||||
.context("Failed to query servers from database")?
|
||||
.into_iter()
|
||||
.map(|server| async move {
|
||||
let state = server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.state)
|
||||
.unwrap_or(ServerState::NotOk);
|
||||
(server, state)
|
||||
})
|
||||
.collect::<FuturesOrdered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
if !self.force
|
||||
&& let Some((server, _)) = servers
|
||||
.iter()
|
||||
.find(|(_, state)| matches!(state, ServerState::NotOk))
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Server {} is NotOk, stopping key rotation. Pass `force: true` to continue anyways.", server.name).into(),
|
||||
);
|
||||
}
|
||||
|
||||
let public_key = core_keys.rotate().await?.into_inner();
|
||||
|
||||
info!("New Public Key: {public_key}");
|
||||
|
||||
let mut log = format!("New Public Key: {public_key}\n");
|
||||
|
||||
for (server, state) in servers {
|
||||
match state {
|
||||
ServerState::Disabled => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
// Shouldn't be reached unless 'force: true'
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Not Ok ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let res = periphery
|
||||
.request(api::keys::RotateCorePublicKey {
|
||||
public_key: public_key.clone(),
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated key for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate for {}. The new Core public key will have to be added manually.",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Core Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -1,36 +1,31 @@
|
||||
use std::pin::Pin;
|
||||
use std::{pin::Pin, time::Instant};
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use axum_extra::{TypedHeader, headers::ContentType};
|
||||
use database::mungos::by_id::find_one_by_id;
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use formatting::format_serror;
|
||||
use futures_util::future::join_all;
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
Operation,
|
||||
permission::PermissionLevel,
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::by_id::find_one_by_id;
|
||||
use resolver_api::Resolve;
|
||||
use response::JsonString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
helpers::update::{init_execution_update, update_update},
|
||||
resource::{KomodoResource, list_full_for_user_using_pattern},
|
||||
resource::{list_full_for_user_using_pattern, KomodoResource},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
@@ -38,23 +33,18 @@ mod action;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod deployment;
|
||||
mod maintenance;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub use {
|
||||
deployment::pull_deployment_inner, stack::pull_stack_inner,
|
||||
};
|
||||
|
||||
pub struct ExecuteArgs {
|
||||
/// The execution id.
|
||||
/// Unique for every /execute call.
|
||||
pub id: Uuid,
|
||||
pub user: User,
|
||||
pub update: Update,
|
||||
}
|
||||
@@ -63,7 +53,7 @@ pub struct ExecuteArgs {
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug, Display)]
|
||||
#[variant_derive(Debug)]
|
||||
#[args(ExecuteArgs)]
|
||||
#[response(JsonString)]
|
||||
#[error(serror::Error)]
|
||||
@@ -92,22 +82,6 @@ pub enum ExecuteRequest {
|
||||
PruneBuildx(PruneBuildx),
|
||||
PruneSystem(PruneSystem),
|
||||
|
||||
// ==== STACK ====
|
||||
DeployStack(DeployStack),
|
||||
BatchDeployStack(BatchDeployStack),
|
||||
DeployStackIfChanged(DeployStackIfChanged),
|
||||
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
|
||||
PullStack(PullStack),
|
||||
BatchPullStack(BatchPullStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
StopStack(StopStack),
|
||||
PauseStack(PauseStack),
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
RunStackService(RunStackService),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
BatchDeploy(BatchDeploy),
|
||||
@@ -120,6 +94,20 @@ pub enum ExecuteRequest {
|
||||
DestroyDeployment(DestroyDeployment),
|
||||
BatchDestroyDeployment(BatchDestroyDeployment),
|
||||
|
||||
// ==== STACK ====
|
||||
DeployStack(DeployStack),
|
||||
BatchDeployStack(BatchDeployStack),
|
||||
DeployStackIfChanged(DeployStackIfChanged),
|
||||
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
|
||||
PullStack(PullStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
StopStack(StopStack),
|
||||
PauseStack(PauseStack),
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
|
||||
// ==== BUILD ====
|
||||
RunBuild(RunBuild),
|
||||
BatchRunBuild(BatchRunBuild),
|
||||
@@ -142,40 +130,22 @@ pub enum ExecuteRequest {
|
||||
RunAction(RunAction),
|
||||
BatchRunAction(BatchRunAction),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
LaunchServer(LaunchServer),
|
||||
|
||||
// ==== ALERTER ====
|
||||
TestAlerter(TestAlerter),
|
||||
SendAlert(SendAlert),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
|
||||
// ==== MAINTENANCE ====
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
BackupCoreDatabase(BackupCoreDatabase),
|
||||
GlobalAutoUpdate(GlobalAutoUpdate),
|
||||
RotateAllServerKeys(RotateAllServerKeys),
|
||||
RotateCoreKeys(RotateCoreKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let req: ExecuteRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteRequest>,
|
||||
@@ -188,11 +158,8 @@ async fn handler(
|
||||
Ok((TypedHeader(ContentType::json()), res))
|
||||
}
|
||||
|
||||
#[typeshare(serialized_as = "Update")]
|
||||
type BoxUpdate = Box<Update>;
|
||||
|
||||
pub enum ExecutionResult {
|
||||
Single(BoxUpdate),
|
||||
Single(Update),
|
||||
/// The batch contents will be pre serialized here
|
||||
Batch(String),
|
||||
}
|
||||
@@ -207,12 +174,10 @@ pub fn inner_handler(
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let task_id = Uuid::new_v4();
|
||||
let req_id = Uuid::new_v4();
|
||||
|
||||
// Need to validate no cancel is active before any update is created.
|
||||
// This ensures no double update created if Cancel is called more than once for the same request.
|
||||
// need to validate no cancel is active before any update is created.
|
||||
build::validate_cancel_build(&request).await?;
|
||||
repo::validate_cancel_repo_build(&request).await?;
|
||||
|
||||
let update = init_execution_update(&request, &user).await?;
|
||||
|
||||
@@ -223,37 +188,28 @@ pub fn inner_handler(
|
||||
// here either.
|
||||
if update.operation == Operation::None {
|
||||
return Ok(ExecutionResult::Batch(
|
||||
task(task_id, request, user, update).await?,
|
||||
task(req_id, request, user, update).await?,
|
||||
));
|
||||
}
|
||||
|
||||
// Spawn a task for the execution which continues
|
||||
// running after this method returns.
|
||||
let handle =
|
||||
tokio::spawn(task(task_id, request, user, update.clone()));
|
||||
tokio::spawn(task(req_id, request, user, update.clone()));
|
||||
|
||||
// Spawns another task to monitor the first for failures,
|
||||
// and add the log to Update about it (which primary task can't do because it errored out)
|
||||
tokio::spawn({
|
||||
let update_id = update.id.clone();
|
||||
async move {
|
||||
let log = match handle.await {
|
||||
Ok(Err(e)) => {
|
||||
warn!("/execute request {task_id} task error: {e:#}",);
|
||||
Log::error("Task Error", format_serror(&e.into()))
|
||||
warn!("/execute request {req_id} task error: {e:#}",);
|
||||
Log::error("task error", format_serror(&e.into()))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("/execute request {task_id} spawn error: {e:?}",);
|
||||
Log::error("Spawn Error", format!("{e:#?}"))
|
||||
warn!("/execute request {req_id} spawn error: {e:?}",);
|
||||
Log::error("spawn error", format!("{e:#?}"))
|
||||
}
|
||||
_ => return,
|
||||
};
|
||||
let res = async {
|
||||
// Nothing to do if update was never actually created,
|
||||
// which is the case when the id is empty.
|
||||
if update_id.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let mut update =
|
||||
find_one_by_id(&db_client().updates, &update_id)
|
||||
.await
|
||||
@@ -273,37 +229,44 @@ pub fn inner_handler(
|
||||
}
|
||||
});
|
||||
|
||||
Ok(ExecutionResult::Single(update.into()))
|
||||
Ok(ExecutionResult::Single(update))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteRequest",
|
||||
skip(user, update),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
update_id = update.id,
|
||||
request = format!("{:?}", request.extract_variant()))
|
||||
)
|
||||
]
|
||||
async fn task(
|
||||
id: Uuid,
|
||||
req_id: Uuid,
|
||||
request: ExecuteRequest,
|
||||
user: User,
|
||||
update: Update,
|
||||
) -> anyhow::Result<String> {
|
||||
let variant = request.extract_variant();
|
||||
info!("/execute request {req_id} | user: {}", user.username);
|
||||
let timer = Instant::now();
|
||||
|
||||
info!(
|
||||
"/execute request {id} | {variant} | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res =
|
||||
match request.resolve(&ExecuteArgs { user, update, id }).await {
|
||||
Err(e) => Err(e.error),
|
||||
Ok(JsonString::Err(e)) => Err(
|
||||
anyhow::Error::from(e)
|
||||
.context("failed to serialize response"),
|
||||
),
|
||||
Ok(JsonString::Ok(res)) => Ok(res),
|
||||
};
|
||||
let res = match request.resolve(&ExecuteArgs { user, update }).await
|
||||
{
|
||||
Err(e) => Err(e.error),
|
||||
Ok(JsonString::Err(e)) => Err(
|
||||
anyhow::Error::from(e).context("failed to serialize response"),
|
||||
),
|
||||
Ok(JsonString::Ok(res)) => Ok(res),
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {id} error: {e:#}");
|
||||
warn!("/execute request {req_id} error: {e:#}");
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
@@ -312,7 +275,6 @@ trait BatchExecute {
|
||||
fn single_request(name: String) -> ExecuteRequest;
|
||||
}
|
||||
|
||||
#[instrument("BatchExecute", skip(user))]
|
||||
async fn batch_execute<E: BatchExecute>(
|
||||
pattern: &str,
|
||||
user: &User,
|
||||
@@ -321,11 +283,9 @@ async fn batch_execute<E: BatchExecute>(
|
||||
pattern,
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let futures = resources.into_iter().map(|resource| {
|
||||
let user = user.clone();
|
||||
async move {
|
||||
|
||||
@@ -1,30 +1,22 @@
|
||||
use std::pin::Pin;
|
||||
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use formatting::{Color, bold, colored, format_serror, muted};
|
||||
use formatting::{bold, colored, format_serror, muted, Color};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchRunProcedure, RunProcedure,
|
||||
},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
update::Update,
|
||||
user::User,
|
||||
permission::PermissionLevel, procedure::Procedure,
|
||||
update::Update, user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
helpers::{procedure::execute_procedure, update::update_update},
|
||||
permission::get_check_permissions,
|
||||
resource::refresh_procedure_state_cache,
|
||||
resource::{self, refresh_procedure_state_cache},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
@@ -38,11 +30,7 @@ impl super::BatchExecute for BatchRunProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunProcedure {
|
||||
#[instrument(
|
||||
"BatchRunProcedure",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
#[instrument(name = "BatchRunProcedure", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
@@ -55,19 +43,10 @@ impl Resolve<ExecuteArgs> for BatchRunProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunProcedure {
|
||||
#[instrument(
|
||||
"RunProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
procedure = self.procedure,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RunProcedure", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(
|
||||
resolve_inner(self.procedure, user.clone(), update.clone())
|
||||
@@ -86,10 +65,10 @@ fn resolve_inner(
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let procedure = get_check_permissions::<Procedure>(
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
&procedure,
|
||||
&user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -149,7 +128,7 @@ fn resolve_inner(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -158,25 +137,6 @@ fn resolve_inner(
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success && procedure.config.failure_alert {
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
target,
|
||||
ts: komodo_timestamp(),
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::ProcedureFailed {
|
||||
id: procedure.id,
|
||||
name: procedure.name,
|
||||
},
|
||||
};
|
||||
send_alerts(&[alert]).await
|
||||
});
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,15 +1,7 @@
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{execute::*, write::RefreshRepoCache},
|
||||
entities::{
|
||||
@@ -22,6 +14,13 @@ use komodo_client::{
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -30,13 +29,18 @@ use crate::{
|
||||
alert::send_alerts,
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
builder::{cleanup_builder_instance, connect_builder_periphery},
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::repo_cancel_channel,
|
||||
git_token, periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource::{self, refresh_repo_state_cache},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
@@ -51,18 +55,10 @@ impl super::BatchExecute for BatchCloneRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchCloneRepo {
|
||||
#[instrument(
|
||||
"BatchCloneRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchCloneRepo", skip( user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchCloneRepo>(&self.pattern, user)
|
||||
@@ -72,24 +68,15 @@ impl Resolve<ExecuteArgs> for BatchCloneRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
#[instrument(
|
||||
"CloneRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CloneRepo", skip( user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -122,7 +109,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -135,18 +122,16 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_clone: repo.config.on_clone.into(),
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(res) => res.res.logs,
|
||||
Ok(res) => res.logs,
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
)]
|
||||
}
|
||||
};
|
||||
@@ -170,57 +155,40 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
);
|
||||
};
|
||||
|
||||
handle_repo_update_return(update).await
|
||||
handle_server_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
impl super::BatchExecute for BatchPullRepo {
|
||||
type Resource = Repo;
|
||||
fn single_request(repo: String) -> ExecuteRequest {
|
||||
ExecuteRequest::PullRepo(PullRepo { repo })
|
||||
ExecuteRequest::CloneRepo(CloneRepo { repo })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchPullRepo {
|
||||
#[instrument(
|
||||
"BatchPullRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchPullRepo", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
|
||||
super::batch_execute::<BatchPullRepo>(&self.pattern, &user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullRepo {
|
||||
#[instrument(
|
||||
"PullRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PullRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -254,7 +222,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -267,15 +235,14 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(res) => {
|
||||
update.commit_hash = res.res.commit_hash.unwrap_or_default();
|
||||
res.res.logs
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.logs
|
||||
}
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
@@ -305,16 +272,12 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
);
|
||||
};
|
||||
|
||||
handle_repo_update_return(update).await
|
||||
handle_server_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"HandleRepoEarlyReturn",
|
||||
skip_all,
|
||||
fields(update_id = update.id)
|
||||
)]
|
||||
async fn handle_repo_update_return(
|
||||
#[instrument(skip_all, fields(update_id = update.id))]
|
||||
async fn handle_server_update_return(
|
||||
update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
// Need to manually update the update before cache refresh,
|
||||
@@ -325,7 +288,7 @@ async fn handle_repo_update_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -335,7 +298,7 @@ async fn handle_repo_update_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument("UpdateLastPulledTime")]
|
||||
#[instrument]
|
||||
async fn update_last_pulled_time(repo_name: &str) {
|
||||
let res = db_client()
|
||||
.repos
|
||||
@@ -359,18 +322,10 @@ impl super::BatchExecute for BatchBuildRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchBuildRepo {
|
||||
#[instrument(
|
||||
"BatchBuildRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BatchBuildRepo", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchBuildRepo>(&self.pattern, user)
|
||||
@@ -380,24 +335,15 @@ impl Resolve<ExecuteArgs> for BatchBuildRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
#[instrument(
|
||||
"BuildRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "BuildRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -474,7 +420,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) = match connect_builder_periphery(
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
repo.name.clone(),
|
||||
None,
|
||||
builder,
|
||||
@@ -492,7 +438,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
return handle_builder_early_return(
|
||||
update, repo.id, repo.name, false,
|
||||
)
|
||||
.await;
|
||||
.await
|
||||
.map_err(Into::into);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -510,8 +457,6 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_clone: repo.config.on_clone.into(),
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect()
|
||||
}) => res,
|
||||
@@ -528,15 +473,14 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash = res.res.commit_hash.unwrap_or_default();
|
||||
|
||||
res.res.commit_message.unwrap_or_default()
|
||||
update.logs.extend(res.logs);
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
@@ -563,8 +507,6 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
// stop the cancel listening task from going forever
|
||||
cancel.cancel();
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
@@ -576,7 +518,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -586,6 +528,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -608,7 +551,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("HandleRepoBuildEarlyReturn", skip(update))]
|
||||
#[instrument(skip(update))]
|
||||
async fn handle_builder_early_return(
|
||||
mut update: Update,
|
||||
repo_id: String,
|
||||
@@ -624,7 +567,7 @@ async fn handle_builder_early_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -632,6 +575,7 @@ async fn handle_builder_early_return(
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -652,6 +596,7 @@ async fn handle_builder_early_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn validate_cancel_repo_build(
|
||||
request: &ExecuteRequest,
|
||||
) -> anyhow::Result<()> {
|
||||
@@ -701,24 +646,15 @@ pub async fn validate_cancel_repo_build(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
#[instrument(
|
||||
"CancelRepoBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CancelRepoBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -760,9 +696,7 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to set CancelRepoBuild Update status Complete after timeout | {e:#}"
|
||||
)
|
||||
warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}")
|
||||
}
|
||||
});
|
||||
|
||||
@@ -770,29 +704,44 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"Interpolate",
|
||||
skip_all,
|
||||
fields(
|
||||
skip_secret_interp = repo.config.skip_secret_interp
|
||||
)
|
||||
)]
|
||||
async fn interpolate(
|
||||
repo: &mut Repo,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<HashSet<(String, String)>> {
|
||||
if !repo.config.skip_secret_interp {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolator
|
||||
.interpolate_repo(repo)?
|
||||
.push_logs(&mut update.logs);
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
Ok(interpolator.secret_replacers)
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_clone,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_pull,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
Ok(secret_replacers)
|
||||
} else {
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
@@ -15,32 +15,22 @@ use resolver_api::Resolve;
|
||||
use crate::{
|
||||
helpers::{periphery_client, update::update_update},
|
||||
monitor::update_cache_for_server,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::action_states,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartContainer {
|
||||
#[instrument(
|
||||
"StartContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -60,7 +50,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
@@ -76,7 +66,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -86,25 +76,15 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
#[instrument(
|
||||
"RestartContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -124,7 +104,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RestartContainer {
|
||||
@@ -142,7 +122,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -152,25 +132,15 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
#[instrument(
|
||||
"PauseContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -190,7 +160,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PauseContainer {
|
||||
@@ -206,7 +176,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -216,25 +186,15 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
#[instrument(
|
||||
"UnpauseContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UnpauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -254,7 +214,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::UnpauseContainer {
|
||||
@@ -272,7 +232,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -282,27 +242,15 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopContainer {
|
||||
#[instrument(
|
||||
"StopContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "StopContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -322,7 +270,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
@@ -340,7 +288,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -350,22 +298,10 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
#[instrument(
|
||||
"DestroyContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DestroyContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let DestroyContainer {
|
||||
server,
|
||||
@@ -373,10 +309,10 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
signal,
|
||||
time,
|
||||
} = self;
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -396,7 +332,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
@@ -414,7 +350,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -424,24 +360,15 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
#[instrument(
|
||||
"StartAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "StartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -460,8 +387,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
let logs = periphery_client(&server)?
|
||||
.request(api::container::StartAllContainers {})
|
||||
.await
|
||||
.context("failed to start all containers on host")?;
|
||||
@@ -475,7 +401,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -484,24 +410,15 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
#[instrument(
|
||||
"RestartAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RestartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -520,8 +437,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
let logs = periphery_client(&server)?
|
||||
.request(api::container::RestartAllContainers {})
|
||||
.await
|
||||
.context("failed to restart all containers on host")?;
|
||||
@@ -537,7 +453,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -546,24 +462,15 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
#[instrument(
|
||||
"PauseAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -582,8 +489,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
let logs = periphery_client(&server)?
|
||||
.request(api::container::PauseAllContainers {})
|
||||
.await
|
||||
.context("failed to pause all containers on host")?;
|
||||
@@ -597,7 +503,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -606,24 +512,15 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
#[instrument(
|
||||
"UnpauseAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UnpauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -642,8 +539,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
let logs = periphery_client(&server)?
|
||||
.request(api::container::UnpauseAllContainers {})
|
||||
.await
|
||||
.context("failed to unpause all containers on host")?;
|
||||
@@ -659,7 +555,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -668,24 +564,15 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
#[instrument(
|
||||
"StopAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "StopAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -704,8 +591,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
let logs = periphery_client(&server)?
|
||||
.request(api::container::StopAllContainers {})
|
||||
.await
|
||||
.context("failed to stop all containers on host")?;
|
||||
@@ -719,7 +605,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -728,24 +614,15 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
#[instrument(
|
||||
"PruneContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -764,7 +641,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PruneContainers {})
|
||||
@@ -783,7 +660,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -793,25 +670,15 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
#[instrument(
|
||||
"DeleteNetwork",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
network = self.name
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteNetwork", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -819,10 +686,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::docker::DeleteNetwork {
|
||||
.request(api::network::DeleteNetwork {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -844,7 +711,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -854,24 +721,15 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
#[instrument(
|
||||
"PruneNetworks",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneNetworks", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -890,10 +748,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::docker::PruneNetworks {})
|
||||
.request(api::network::PruneNetworks {})
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune networks on server {}",
|
||||
@@ -907,7 +765,7 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -917,25 +775,15 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
#[instrument(
|
||||
"DeleteImage",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
image = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteImage", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -943,10 +791,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::docker::DeleteImage {
|
||||
.request(api::image::DeleteImage {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -965,7 +813,7 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -975,24 +823,15 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneImages {
|
||||
#[instrument(
|
||||
"PruneImages",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneImages", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1011,10 +850,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::docker::PruneImages {}).await {
|
||||
match periphery.request(api::image::PruneImages {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune images",
|
||||
@@ -1026,7 +865,7 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1036,25 +875,15 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
#[instrument(
|
||||
"DeleteVolume",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
volume = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteVolume", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1062,10 +891,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::docker::DeleteVolume {
|
||||
.request(api::volume::DeleteVolume {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -1087,7 +916,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1097,24 +926,15 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
#[instrument(
|
||||
"PruneVolumes",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneVolumes", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1133,10 +953,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::docker::PruneVolumes {}).await {
|
||||
match periphery.request(api::volume::PruneVolumes {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune volumes",
|
||||
@@ -1148,7 +968,7 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1158,24 +978,15 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
#[instrument(
|
||||
"PruneDockerBuilders",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneDockerBuilders", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1194,7 +1005,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuilders {}).await {
|
||||
@@ -1209,7 +1020,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1219,24 +1030,15 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
#[instrument(
|
||||
"PruneBuildx",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneBuildx", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1255,7 +1057,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuildx {}).await {
|
||||
@@ -1270,7 +1072,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1280,24 +1082,15 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
#[instrument(
|
||||
"PruneSystem",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "PruneSystem", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1316,7 +1109,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery.request(api::PruneSystem {}).await {
|
||||
Ok(log) => log,
|
||||
@@ -1330,7 +1123,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
156
bin/core/src/api/execute/server_template.rs
Normal file
156
bin/core/src/api/execute/server_template.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::{execute::LaunchServer, write::CreateServer},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
server::PartialServerConfig,
|
||||
server_template::{ServerTemplate, ServerTemplateConfig},
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
cloud::{
|
||||
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
|
||||
},
|
||||
helpers::update::update_update,
|
||||
resource,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for LaunchServer {
|
||||
#[instrument(name = "LaunchServer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
// validate name isn't already taken by another server
|
||||
if db_client()
|
||||
.servers
|
||||
.find_one(doc! {
|
||||
"name": &self.name
|
||||
})
|
||||
.await
|
||||
.context("failed to query db for servers")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("name is already taken").into());
|
||||
}
|
||||
|
||||
let template = resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.server_template,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update.push_simple_log(
|
||||
"launching server",
|
||||
format!("{:#?}", template.config),
|
||||
);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let config = match template.config {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let region = config.region.clone();
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let instance =
|
||||
match launch_ec2_instance(&self.name, config).await {
|
||||
Ok(instance) => instance,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch aws instance\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {} on ip {}",
|
||||
self.name, instance.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("{protocol}://{}:{port}", instance.ip)
|
||||
.into(),
|
||||
region: region.into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
let datacenter = config.datacenter;
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let server =
|
||||
match launch_hetzner_server(&self.name, config).await {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch hetzner server\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {} on ip {}",
|
||||
self.name, server.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("{protocol}://{}:{port}", server.ip)
|
||||
.into(),
|
||||
region: datacenter.as_ref().to_string().into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match (CreateServer {
|
||||
name: self.name,
|
||||
config,
|
||||
})
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await
|
||||
{
|
||||
Ok(server) => {
|
||||
update.push_simple_log(
|
||||
"create server",
|
||||
format!("created server {} ({})", server.name, server.id),
|
||||
);
|
||||
update.other_data = server.id;
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"create server",
|
||||
format_serror(
|
||||
&e.error.context("failed to create server").into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,11 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use formatting::{Color, colored, format_serror};
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::{colored, format_serror, Color};
|
||||
use komodo_client::{
|
||||
api::{execute::RunSync, write::RefreshResourceSyncPending},
|
||||
entities::{
|
||||
self, ResourceTargetVariant,
|
||||
self,
|
||||
action::Action,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
@@ -20,76 +16,59 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Log, Update},
|
||||
user::sync_user,
|
||||
ResourceTargetVariant,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{oid::ObjectId, to_document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
all_resources::AllResourcesById, query::get_id_to_tags,
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
helpers::{query::get_id_to_tags, update::update_update},
|
||||
resource::{self, refresh_resource_sync_state_cache},
|
||||
state::{action_states, db_client},
|
||||
sync::{
|
||||
ResourceSyncTrait,
|
||||
deploy::{
|
||||
SyncDeployParams, build_deploy_cache, deploy_from_cache,
|
||||
build_deploy_cache, deploy_from_cache, SyncDeployParams,
|
||||
},
|
||||
execute::{ExecuteResourceSync, get_updates_for_execution},
|
||||
execute::{get_updates_for_execution, ExecuteResourceSync},
|
||||
remote::RemoteResources,
|
||||
AllResourcesById, ResourceSyncTrait,
|
||||
},
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunSync {
|
||||
#[instrument(
|
||||
"RunSync",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
sync = self.sync,
|
||||
resource_type = format!("{:?}", self.resource_type),
|
||||
resources = format!("{:?}", self.resources),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RunSync", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let RunSync {
|
||||
sync,
|
||||
resource_type: match_resource_type,
|
||||
resources: match_resources,
|
||||
} = self;
|
||||
let sync = get_check_permissions::<entities::sync::ResourceSync>(
|
||||
&sync,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
let sync = resource::get_check_permissions::<
|
||||
entities::sync::ResourceSync,
|
||||
>(&sync, &user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
let repo = if !sync.config.files_on_host
|
||||
&& !sync.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&sync.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// get the action state for the sync (or insert default).
|
||||
let action_state =
|
||||
action_states().sync.get_or_insert_default(&sync.id).await;
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.get_or_insert_default(&sync.id)
|
||||
.await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure sync not already busy before updating.
|
||||
@@ -108,10 +87,9 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
message,
|
||||
file_errors,
|
||||
..
|
||||
} =
|
||||
crate::sync::remote::get_remote_resources(&sync, repo.as_ref())
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
} = crate::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
update.logs.extend(logs);
|
||||
update_update(update.clone()).await?;
|
||||
@@ -168,6 +146,10 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
.servers
|
||||
.get(&name_or_id)
|
||||
.map(|s| s.name.clone()),
|
||||
ResourceTargetVariant::ServerTemplate => all_resources
|
||||
.templates
|
||||
.get(&name_or_id)
|
||||
.map(|t| t.name.clone()),
|
||||
ResourceTargetVariant::Stack => all_resources
|
||||
.stacks
|
||||
.get(&name_or_id)
|
||||
@@ -222,141 +204,145 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
deployment_map: &deployments_by_name,
|
||||
stacks: &resources.stacks,
|
||||
stack_map: &stacks_by_name,
|
||||
all_resources: &all_resources,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let delete = sync.config.managed || sync.config.delete;
|
||||
|
||||
let server_deltas = if sync.config.include_resources {
|
||||
let (servers_to_create, servers_to_update, servers_to_delete) =
|
||||
get_updates_for_execution::<Server>(
|
||||
resources.servers,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let stack_deltas = if sync.config.include_resources {
|
||||
.await?;
|
||||
let (
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
) = get_updates_for_execution::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (stacks_to_create, stacks_to_update, stacks_to_delete) =
|
||||
get_updates_for_execution::<Stack>(
|
||||
resources.stacks,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let deployment_deltas = if sync.config.include_resources {
|
||||
get_updates_for_execution::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let build_deltas = if sync.config.include_resources {
|
||||
.await?;
|
||||
let (builds_to_create, builds_to_update, builds_to_delete) =
|
||||
get_updates_for_execution::<Build>(
|
||||
resources.builds,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let repo_deltas = if sync.config.include_resources {
|
||||
.await?;
|
||||
let (repos_to_create, repos_to_update, repos_to_delete) =
|
||||
get_updates_for_execution::<Repo>(
|
||||
resources.repos,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let procedure_deltas = if sync.config.include_resources {
|
||||
get_updates_for_execution::<Procedure>(
|
||||
resources.procedures,
|
||||
delete,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let action_deltas = if sync.config.include_resources {
|
||||
.await?;
|
||||
let (
|
||||
procedures_to_create,
|
||||
procedures_to_update,
|
||||
procedures_to_delete,
|
||||
) = get_updates_for_execution::<Procedure>(
|
||||
resources.procedures,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (actions_to_create, actions_to_update, actions_to_delete) =
|
||||
get_updates_for_execution::<Action>(
|
||||
resources.actions,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let builder_deltas = if sync.config.include_resources {
|
||||
.await?;
|
||||
let (builders_to_create, builders_to_update, builders_to_delete) =
|
||||
get_updates_for_execution::<Builder>(
|
||||
resources.builders,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let alerter_deltas = if sync.config.include_resources {
|
||||
.await?;
|
||||
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
|
||||
get_updates_for_execution::<Alerter>(
|
||||
resources.alerters,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let resource_sync_deltas = if sync.config.include_resources {
|
||||
get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
.await?;
|
||||
let (
|
||||
server_templates_to_create,
|
||||
server_templates_to_update,
|
||||
server_templates_to_delete,
|
||||
) = get_updates_for_execution::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
resource_syncs_to_create,
|
||||
resource_syncs_to_update,
|
||||
resource_syncs_to_delete,
|
||||
) = get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (
|
||||
variables_to_create,
|
||||
@@ -364,11 +350,12 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
variables_to_delete,
|
||||
) = if match_resource_type.is_none()
|
||||
&& match_resources.is_none()
|
||||
&& sync.config.include_variables
|
||||
&& sync.config.match_tags.is_empty()
|
||||
{
|
||||
crate::sync::variables::get_updates_for_execution(
|
||||
resources.variables,
|
||||
delete,
|
||||
// Delete doesn't work with variables when match tags are set
|
||||
sync.config.match_tags.is_empty() && delete,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
@@ -380,11 +367,13 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
user_groups_to_delete,
|
||||
) = if match_resource_type.is_none()
|
||||
&& match_resources.is_none()
|
||||
&& sync.config.include_user_groups
|
||||
&& sync.config.match_tags.is_empty()
|
||||
{
|
||||
crate::sync::user_groups::get_updates_for_execution(
|
||||
resources.user_groups,
|
||||
delete,
|
||||
// Delete doesn't work with user groups when match tags are set
|
||||
sync.config.match_tags.is_empty() && delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
@@ -392,16 +381,39 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
};
|
||||
|
||||
if deploy_cache.is_empty()
|
||||
&& resource_sync_deltas.no_changes()
|
||||
&& server_deltas.no_changes()
|
||||
&& deployment_deltas.no_changes()
|
||||
&& stack_deltas.no_changes()
|
||||
&& build_deltas.no_changes()
|
||||
&& builder_deltas.no_changes()
|
||||
&& alerter_deltas.no_changes()
|
||||
&& repo_deltas.no_changes()
|
||||
&& procedure_deltas.no_changes()
|
||||
&& action_deltas.no_changes()
|
||||
&& resource_syncs_to_create.is_empty()
|
||||
&& resource_syncs_to_update.is_empty()
|
||||
&& resource_syncs_to_delete.is_empty()
|
||||
&& server_templates_to_create.is_empty()
|
||||
&& server_templates_to_update.is_empty()
|
||||
&& server_templates_to_delete.is_empty()
|
||||
&& servers_to_create.is_empty()
|
||||
&& servers_to_update.is_empty()
|
||||
&& servers_to_delete.is_empty()
|
||||
&& deployments_to_create.is_empty()
|
||||
&& deployments_to_update.is_empty()
|
||||
&& deployments_to_delete.is_empty()
|
||||
&& stacks_to_create.is_empty()
|
||||
&& stacks_to_update.is_empty()
|
||||
&& stacks_to_delete.is_empty()
|
||||
&& builds_to_create.is_empty()
|
||||
&& builds_to_update.is_empty()
|
||||
&& builds_to_delete.is_empty()
|
||||
&& builders_to_create.is_empty()
|
||||
&& builders_to_update.is_empty()
|
||||
&& builders_to_delete.is_empty()
|
||||
&& alerters_to_create.is_empty()
|
||||
&& alerters_to_update.is_empty()
|
||||
&& alerters_to_delete.is_empty()
|
||||
&& repos_to_create.is_empty()
|
||||
&& repos_to_update.is_empty()
|
||||
&& repos_to_delete.is_empty()
|
||||
&& procedures_to_create.is_empty()
|
||||
&& procedures_to_update.is_empty()
|
||||
&& procedures_to_delete.is_empty()
|
||||
&& actions_to_create.is_empty()
|
||||
&& actions_to_update.is_empty()
|
||||
&& actions_to_delete.is_empty()
|
||||
&& user_groups_to_create.is_empty()
|
||||
&& user_groups_to_update.is_empty()
|
||||
&& user_groups_to_delete.is_empty()
|
||||
@@ -444,52 +456,111 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
ResourceSync::execute_sync_updates(resource_sync_deltas).await,
|
||||
ResourceSync::execute_sync_updates(
|
||||
resource_syncs_to_create,
|
||||
resource_syncs_to_update,
|
||||
resource_syncs_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Server::execute_sync_updates(server_deltas).await,
|
||||
ServerTemplate::execute_sync_updates(
|
||||
server_templates_to_create,
|
||||
server_templates_to_update,
|
||||
server_templates_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Alerter::execute_sync_updates(alerter_deltas).await,
|
||||
Server::execute_sync_updates(
|
||||
servers_to_create,
|
||||
servers_to_update,
|
||||
servers_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Action::execute_sync_updates(action_deltas).await,
|
||||
Alerter::execute_sync_updates(
|
||||
alerters_to_create,
|
||||
alerters_to_update,
|
||||
alerters_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Action::execute_sync_updates(
|
||||
actions_to_create,
|
||||
actions_to_update,
|
||||
actions_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependent on server
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Builder::execute_sync_updates(builder_deltas).await,
|
||||
Builder::execute_sync_updates(
|
||||
builders_to_create,
|
||||
builders_to_update,
|
||||
builders_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Repo::execute_sync_updates(repo_deltas).await,
|
||||
Repo::execute_sync_updates(
|
||||
repos_to_create,
|
||||
repos_to_update,
|
||||
repos_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on builder
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Build::execute_sync_updates(build_deltas).await,
|
||||
Build::execute_sync_updates(
|
||||
builds_to_create,
|
||||
builds_to_update,
|
||||
builds_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on server / build
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Deployment::execute_sync_updates(deployment_deltas).await,
|
||||
Deployment::execute_sync_updates(
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
// stack only depends on server, but maybe will depend on build later.
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Stack::execute_sync_updates(stack_deltas).await,
|
||||
Stack::execute_sync_updates(
|
||||
stacks_to_create,
|
||||
stacks_to_update,
|
||||
stacks_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on everything
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Procedure::execute_sync_updates(procedure_deltas).await,
|
||||
Procedure::execute_sync_updates(
|
||||
procedures_to_create,
|
||||
procedures_to_update,
|
||||
procedures_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Execute the deploy cache
|
||||
@@ -538,6 +609,21 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_resource_sync_state_cache().await;
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
pub mod auth;
|
||||
pub mod execute;
|
||||
pub mod read;
|
||||
pub mod terminal;
|
||||
pub mod user;
|
||||
pub mod write;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct Variant {
|
||||
variant: String,
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_state_cache, action_states},
|
||||
};
|
||||
@@ -25,10 +24,10 @@ impl Resolve<ReadArgs> for GetAction {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(
|
||||
get_check_permissions::<Action>(
|
||||
resource::get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -46,13 +45,8 @@ impl Resolve<ReadArgs> for ListActions {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Action>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
resource::list_for_user::<Action>(self.query, &user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -69,10 +63,7 @@ impl Resolve<ReadArgs> for ListFullActions {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Action>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -84,10 +75,10 @@ impl Resolve<ReadArgs> for GetActionActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ActionActionState> {
|
||||
let action = get_check_permissions::<Action>(
|
||||
let action = resource::get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -107,8 +98,7 @@ impl Resolve<ReadArgs> for GetActionsSummary {
|
||||
) -> serror::Result<GetActionsSummaryResponse> {
|
||||
let actions = resource::list_full_for_user::<Action>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -131,8 +121,8 @@ impl Resolve<ReadArgs> for GetActionsSummary {
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.running > 0 => {
|
||||
res.running += action_states.running;
|
||||
(_, action_states) if action_states.running => {
|
||||
res.running += 1;
|
||||
}
|
||||
(ActionState::Ok, _) => res.ok += 1,
|
||||
(ActionState::Failed, _) => res.failed += 1,
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
},
|
||||
entities::{
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
server::Server, stack::Stack, sync::ResourceSync,
|
||||
deployment::Deployment, server::Server, stack::Stack,
|
||||
sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config, permission::list_resource_ids_for_user,
|
||||
config::core_config, resource::get_resource_ids_for_user,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
@@ -31,29 +31,14 @@ impl Resolve<ReadArgs> for ListAlerts {
|
||||
) -> serror::Result<ListAlertsResponse> {
|
||||
let mut query = self.query.unwrap_or_default();
|
||||
if !user.admin && !core_config().transparent_mode {
|
||||
let (server_ids, stack_ids, deployment_ids, sync_ids) = tokio::try_join!(
|
||||
list_resource_ids_for_user::<Server>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
),
|
||||
list_resource_ids_for_user::<Stack>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
),
|
||||
list_resource_ids_for_user::<Deployment>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
),
|
||||
list_resource_ids_for_user::<ResourceSync>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
)?;
|
||||
// All of the vecs will be non-none if !admin and !transparent mode.
|
||||
let server_ids =
|
||||
get_resource_ids_for_user::<Server>(user).await?;
|
||||
let stack_ids =
|
||||
get_resource_ids_for_user::<Stack>(user).await?;
|
||||
let deployment_ids =
|
||||
get_resource_ids_for_user::<Deployment>(user).await?;
|
||||
let sync_ids =
|
||||
get_resource_ids_for_user::<ResourceSync>(user).await?;
|
||||
query.extend(doc! {
|
||||
"$or": [
|
||||
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -8,13 +6,12 @@ use komodo_client::{
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags,
|
||||
permission::{get_check_permissions, list_resource_ids_for_user},
|
||||
resource,
|
||||
state::db_client,
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -25,10 +22,10 @@ impl Resolve<ReadArgs> for GetAlerter {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(
|
||||
get_check_permissions::<Alerter>(
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
&self.alerter,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -46,13 +43,8 @@ impl Resolve<ReadArgs> for ListAlerters {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Alerter>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
resource::list_for_user::<Alerter>(self.query, user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -69,10 +61,7 @@ impl Resolve<ReadArgs> for ListFullAlerters {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Alerter>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -84,11 +73,9 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetAlertersSummaryResponse> {
|
||||
let query = match list_resource_ids_for_user::<Alerter>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
Alerter,
|
||||
>(&user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
|
||||
@@ -2,27 +2,30 @@ use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use futures_util::TryStreamExt;
|
||||
use futures::TryStreamExt;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
Operation,
|
||||
build::{Build, BuildActionState, BuildListItem, BuildState},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
update::UpdateStatus,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, build_state_cache, db_client},
|
||||
state::{
|
||||
action_states, build_state_cache, db_client, github_client,
|
||||
},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -33,10 +36,10 @@ impl Resolve<ReadArgs> for GetBuild {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(
|
||||
get_check_permissions::<Build>(
|
||||
resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -54,13 +57,8 @@ impl Resolve<ReadArgs> for ListBuilds {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Build>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
resource::list_for_user::<Build>(self.query, user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -77,10 +75,7 @@ impl Resolve<ReadArgs> for ListFullBuilds {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Build>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -92,10 +87,10 @@ impl Resolve<ReadArgs> for GetBuildActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<BuildActionState> {
|
||||
let build = get_check_permissions::<Build>(
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -116,7 +111,6 @@ impl Resolve<ReadArgs> for GetBuildsSummary {
|
||||
let builds = resource::list_full_for_user::<Build>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -224,10 +218,10 @@ impl Resolve<ReadArgs> for ListBuildVersions {
|
||||
patch,
|
||||
limit,
|
||||
} = self;
|
||||
let build = get_check_permissions::<Build>(
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -280,10 +274,7 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
let builds = resource::list_full_for_user::<Build>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
@@ -302,3 +293,81 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetBuildWebhookEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetBuildWebhookEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com"
|
||||
|| build.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: true,
|
||||
enabled: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: true,
|
||||
enabled: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -8,13 +6,12 @@ use komodo_client::{
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags,
|
||||
permission::{get_check_permissions, list_resource_ids_for_user},
|
||||
resource,
|
||||
state::db_client,
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -25,10 +22,10 @@ impl Resolve<ReadArgs> for GetBuilder {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(
|
||||
get_check_permissions::<Builder>(
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&self.builder,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -46,13 +43,8 @@ impl Resolve<ReadArgs> for ListBuilders {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Builder>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
resource::list_for_user::<Builder>(self.query, user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -69,10 +61,7 @@ impl Resolve<ReadArgs> for ListFullBuilders {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Builder>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -84,11 +73,9 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetBuildersSummaryResponse> {
|
||||
let query = match list_resource_ids_for_user::<Builder>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
Builder,
|
||||
>(&user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{cmp, collections::HashSet};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -8,22 +8,19 @@ use komodo_client::{
|
||||
Deployment, DeploymentActionState, DeploymentConfig,
|
||||
DeploymentListItem, DeploymentState,
|
||||
},
|
||||
docker::container::{Container, ContainerStats},
|
||||
docker::container::ContainerStats,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
server::Server,
|
||||
update::Log,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::{self, container::InspectContainer};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{
|
||||
action_states, deployment_status_cache, server_status_cache,
|
||||
},
|
||||
state::{action_states, deployment_status_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -34,10 +31,10 @@ impl Resolve<ReadArgs> for GetDeployment {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(
|
||||
get_check_permissions::<Deployment>(
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -54,23 +51,12 @@ impl Resolve<ReadArgs> for ListDeployments {
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
let only_update_available = self.query.specific.update_available;
|
||||
let deployments = resource::list_for_user::<Deployment>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
Ok(
|
||||
resource::list_for_user::<Deployment>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
.await?;
|
||||
let deployments = if only_update_available {
|
||||
deployments
|
||||
.into_iter()
|
||||
.filter(|deployment| deployment.info.update_available)
|
||||
.collect()
|
||||
} else {
|
||||
deployments
|
||||
};
|
||||
Ok(deployments)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,10 +72,7 @@ impl Resolve<ReadArgs> for ListFullDeployments {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Deployment>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -101,10 +84,10 @@ impl Resolve<ReadArgs> for GetDeploymentContainer {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetDeploymentContainerResponse> {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let status = deployment_status_cache()
|
||||
@@ -135,18 +118,17 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = get_check_permissions::<Deployment>(
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(api::container::GetContainerLog {
|
||||
name,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -174,18 +156,17 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = get_check_permissions::<Deployment>(
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(api::container::GetContainerLogSearch {
|
||||
name,
|
||||
terms,
|
||||
@@ -199,51 +180,6 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectDeploymentContainer {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Container> {
|
||||
let InspectDeploymentContainer { deployment } = self;
|
||||
let Deployment {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
user,
|
||||
PermissionLevel::Read.inspect(),
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot inspect deployment, not attached to any server"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot inspect container: server is {:?}",
|
||||
cache.state
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -253,10 +189,10 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = get_check_permissions::<Deployment>(
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
@@ -265,8 +201,7 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
);
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(api::container::GetContainerStats { name })
|
||||
.await
|
||||
.context("failed to get stats from periphery")?;
|
||||
@@ -279,10 +214,10 @@ impl Resolve<ReadArgs> for GetDeploymentActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<DeploymentActionState> {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -303,7 +238,6 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
|
||||
let deployments = resource::list_full_for_user::<Deployment>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -325,9 +259,7 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
|
||||
res.not_deployed += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
if !deployment.template {
|
||||
res.unknown += 1;
|
||||
}
|
||||
res.unknown += 1;
|
||||
}
|
||||
_ => {
|
||||
res.unhealthy += 1;
|
||||
@@ -349,10 +281,7 @@ impl Resolve<ReadArgs> for ListCommonDeploymentExtraArgs {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
let deployments = resource::list_full_for_user::<Deployment>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
|
||||
@@ -1,57 +1,47 @@
|
||||
use std::{collections::HashSet, time::Instant};
|
||||
use std::{collections::HashSet, sync::OnceLock, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
build::Build,
|
||||
builder::{Builder, BuilderConfig},
|
||||
config::{DockerRegistry, GitProvider},
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
sync::ResourceSync,
|
||||
user::User,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
config::{core_config, core_keys},
|
||||
helpers::periphery_client,
|
||||
auth::auth_request, config::core_config, helpers::periphery_client,
|
||||
resource,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alert;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod permission;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod schedule;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod terminal;
|
||||
mod toml;
|
||||
mod update;
|
||||
mod user;
|
||||
@@ -77,7 +67,7 @@ enum ReadRequest {
|
||||
|
||||
// ==== USER ====
|
||||
GetUsername(GetUsername),
|
||||
GetPermission(GetPermission),
|
||||
GetPermissionLevel(GetPermissionLevel),
|
||||
FindUser(FindUser),
|
||||
ListUsers(ListUsers),
|
||||
ListApiKeys(ListApiKeys),
|
||||
@@ -103,56 +93,35 @@ enum ReadRequest {
|
||||
ListActions(ListActions),
|
||||
ListFullActions(ListFullActions),
|
||||
|
||||
// ==== SCHEDULE ====
|
||||
ListSchedules(ListSchedules),
|
||||
// ==== SERVER TEMPLATE ====
|
||||
GetServerTemplate(GetServerTemplate),
|
||||
GetServerTemplatesSummary(GetServerTemplatesSummary),
|
||||
ListServerTemplates(ListServerTemplates),
|
||||
ListFullServerTemplates(ListFullServerTemplates),
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryInformation(GetPeripheryInformation),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetServerActionState(GetServerActionState),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
ListServers(ListServers),
|
||||
ListFullServers(ListFullServers),
|
||||
|
||||
// ==== TERMINAL ====
|
||||
ListTerminals(ListTerminals),
|
||||
|
||||
// ==== DOCKER ====
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
InspectDockerContainer(InspectDockerContainer),
|
||||
GetResourceMatchingContainer(GetResourceMatchingContainer),
|
||||
GetContainerLog(GetContainerLog),
|
||||
SearchContainerLog(SearchContainerLog),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
InspectDockerNetwork(InspectDockerNetwork),
|
||||
ListDockerImages(ListDockerImages),
|
||||
InspectDockerImage(InspectDockerImage),
|
||||
ListDockerImageHistory(ListDockerImageHistory),
|
||||
ListDockerVolumes(ListDockerVolumes),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
GetSystemStats(GetSystemStats),
|
||||
ListSystemProcesses(ListSystemProcesses),
|
||||
|
||||
// ==== STACK ====
|
||||
GetStacksSummary(GetStacksSummary),
|
||||
GetStack(GetStack),
|
||||
GetStackActionState(GetStackActionState),
|
||||
GetStackLog(GetStackLog),
|
||||
SearchStackLog(SearchStackLog),
|
||||
InspectStackContainer(InspectStackContainer),
|
||||
ListStacks(ListStacks),
|
||||
ListFullStacks(ListFullStacks),
|
||||
ListStackServices(ListStackServices),
|
||||
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
|
||||
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
ListDockerImages(ListDockerImages),
|
||||
ListDockerVolumes(ListDockerVolumes),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
GetDeploymentsSummary(GetDeploymentsSummary),
|
||||
@@ -162,7 +131,6 @@ enum ReadRequest {
|
||||
GetDeploymentStats(GetDeploymentStats),
|
||||
GetDeploymentLog(GetDeploymentLog),
|
||||
SearchDeploymentLog(SearchDeploymentLog),
|
||||
InspectDeploymentContainer(InspectDeploymentContainer),
|
||||
ListDeployments(ListDeployments),
|
||||
ListFullDeployments(ListFullDeployments),
|
||||
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
|
||||
@@ -173,6 +141,7 @@ enum ReadRequest {
|
||||
GetBuildActionState(GetBuildActionState),
|
||||
GetBuildMonthlyStats(GetBuildMonthlyStats),
|
||||
ListBuildVersions(ListBuildVersions),
|
||||
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
|
||||
ListBuilds(ListBuilds),
|
||||
ListFullBuilds(ListFullBuilds),
|
||||
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
|
||||
@@ -181,6 +150,7 @@ enum ReadRequest {
|
||||
GetReposSummary(GetReposSummary),
|
||||
GetRepo(GetRepo),
|
||||
GetRepoActionState(GetRepoActionState),
|
||||
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
|
||||
ListRepos(ListRepos),
|
||||
ListFullRepos(ListFullRepos),
|
||||
|
||||
@@ -188,9 +158,23 @@ enum ReadRequest {
|
||||
GetResourceSyncsSummary(GetResourceSyncsSummary),
|
||||
GetResourceSync(GetResourceSync),
|
||||
GetResourceSyncActionState(GetResourceSyncActionState),
|
||||
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
|
||||
ListResourceSyncs(ListResourceSyncs),
|
||||
ListFullResourceSyncs(ListFullResourceSyncs),
|
||||
|
||||
// ==== STACK ====
|
||||
GetStacksSummary(GetStacksSummary),
|
||||
GetStack(GetStack),
|
||||
GetStackActionState(GetStackActionState),
|
||||
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
|
||||
GetStackServiceLog(GetStackServiceLog),
|
||||
SearchStackServiceLog(SearchStackServiceLog),
|
||||
ListStacks(ListStacks),
|
||||
ListFullStacks(ListFullStacks),
|
||||
ListStackServices(ListStackServices),
|
||||
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
|
||||
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
|
||||
|
||||
// ==== BUILDER ====
|
||||
GetBuildersSummary(GetBuildersSummary),
|
||||
GetBuilder(GetBuilder),
|
||||
@@ -219,6 +203,11 @@ enum ReadRequest {
|
||||
ListAlerts(ListAlerts),
|
||||
GetAlert(GetAlert),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
GetSystemStats(GetSystemStats),
|
||||
ListSystemProcesses(ListSystemProcesses),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
GetVariable(GetVariable),
|
||||
ListVariables(ListVariables),
|
||||
@@ -228,30 +217,15 @@ enum ReadRequest {
|
||||
ListGitProviderAccounts(ListGitProviderAccounts),
|
||||
GetDockerRegistryAccount(GetDockerRegistryAccount),
|
||||
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
ListOnboardingKeys(ListOnboardingKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: ReadRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ReadRequest>,
|
||||
@@ -279,13 +253,11 @@ impl Resolve<ReadArgs> for GetVersion {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &ReadArgs,
|
||||
) -> serror::Result<GetCoreInfoResponse> {
|
||||
fn core_info() -> &'static GetCoreInfoResponse {
|
||||
static CORE_INFO: OnceLock<GetCoreInfoResponse> = OnceLock::new();
|
||||
CORE_INFO.get_or_init(|| {
|
||||
let config = core_config();
|
||||
let info = GetCoreInfoResponse {
|
||||
GetCoreInfoResponse {
|
||||
title: config.title.clone(),
|
||||
monitoring_interval: config.monitoring_interval,
|
||||
webhook_base_url: if config.webhook_base_url.is_empty() {
|
||||
@@ -297,12 +269,22 @@ impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
ui_write_disabled: config.ui_write_disabled,
|
||||
disable_confirm_dialog: config.disable_confirm_dialog,
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
disable_websocket_reconnect: config.disable_websocket_reconnect,
|
||||
enable_fancy_toml: config.enable_fancy_toml,
|
||||
timezone: config.timezone.clone(),
|
||||
public_key: core_keys().load().public.to_string(),
|
||||
};
|
||||
Ok(info)
|
||||
github_webhook_owners: config
|
||||
.github_webhook_app
|
||||
.installations
|
||||
.iter()
|
||||
.map(|i| i.namespace.to_string())
|
||||
.collect(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &ReadArgs,
|
||||
) -> serror::Result<GetCoreInfoResponse> {
|
||||
Ok(core_info().clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,13 +315,12 @@ impl Resolve<ReadArgs> for ListSecrets {
|
||||
_ => {
|
||||
return Err(
|
||||
anyhow!("target must be `Server` or `Builder`").into(),
|
||||
);
|
||||
)
|
||||
}
|
||||
};
|
||||
if let Some(id) = server_id {
|
||||
let server = resource::get::<Server>(&id).await?;
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
let more = periphery_client(&server)?
|
||||
.request(periphery_client::api::ListSecrets {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -392,7 +373,7 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
|
||||
_ => {
|
||||
return Err(
|
||||
anyhow!("target must be `Server` or `Builder`").into(),
|
||||
);
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -400,20 +381,17 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
|
||||
let (builds, repos, syncs) = tokio::try_join!(
|
||||
resource::list_full_for_user::<Build>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
&[]
|
||||
),
|
||||
resource::list_full_for_user::<Repo>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
&[]
|
||||
),
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
&[]
|
||||
),
|
||||
)?;
|
||||
@@ -495,7 +473,7 @@ impl Resolve<ReadArgs> for ListDockerRegistriesFromConfig {
|
||||
_ => {
|
||||
return Err(
|
||||
anyhow!("target must be `Server` or `Builder`").into(),
|
||||
);
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -511,8 +489,7 @@ async fn merge_git_providers_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
let more = periphery_client(&server)?
|
||||
.request(periphery_client::api::ListGitProviders {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -550,8 +527,7 @@ async fn merge_docker_registries_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
let more = periphery_client(&server)?
|
||||
.request(periphery_client::api::ListDockerRegistries {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::api::read::{
|
||||
ListOnboardingKeys, ListOnboardingKeysResponse,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::read::ReadArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ReadArgs> for ListOnboardingKeys {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user: admin }: &ReadArgs,
|
||||
) -> serror::Result<ListOnboardingKeysResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut keys =
|
||||
find_collect(&db_client().onboarding_keys, None, None)
|
||||
.await
|
||||
.context(
|
||||
"Failed to query database for Server onboarding keys",
|
||||
)?;
|
||||
|
||||
// No expiry keys first, followed
|
||||
keys.sort_by(|a, b| {
|
||||
if a.expires == b.expires {
|
||||
Ordering::Equal
|
||||
} else if a.expires == 0 {
|
||||
Ordering::Less
|
||||
} else if b.expires == 0 {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
// Descending
|
||||
b.expires.cmp(&a.expires)
|
||||
}
|
||||
});
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetPermission, GetPermissionResponse, ListPermissions,
|
||||
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
|
||||
ListPermissionsResponse, ListUserTargetPermissions,
|
||||
ListUserTargetPermissionsResponse,
|
||||
},
|
||||
entities::permission::PermissionLevel,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -35,13 +35,13 @@ impl Resolve<ReadArgs> for ListPermissions {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPermission {
|
||||
impl Resolve<ReadArgs> for GetPermissionLevel {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPermissionResponse> {
|
||||
) -> serror::Result<GetPermissionLevelResponse> {
|
||||
if user.admin {
|
||||
return Ok(PermissionLevel::Write.all());
|
||||
return Ok(PermissionLevel::Write);
|
||||
}
|
||||
Ok(get_user_permission_on_target(user, &self.target).await?)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, procedure_state_cache},
|
||||
};
|
||||
@@ -23,10 +22,10 @@ impl Resolve<ReadArgs> for GetProcedure {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetProcedureResponse> {
|
||||
Ok(
|
||||
get_check_permissions::<Procedure>(
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
&self.procedure,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -45,10 +44,7 @@ impl Resolve<ReadArgs> for ListProcedures {
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Procedure>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -67,10 +63,7 @@ impl Resolve<ReadArgs> for ListFullProcedures {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Procedure>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -85,7 +78,6 @@ impl Resolve<ReadArgs> for GetProceduresSummary {
|
||||
let procedures = resource::list_full_for_user::<Procedure>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -128,10 +120,10 @@ impl Resolve<ReadArgs> for GetProcedureActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetProcedureActionStateResponse> {
|
||||
let procedure = get_check_permissions::<Procedure>(
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
&self.procedure,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::{Document, doc};
|
||||
use database::mungos::{
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::api::read::*;
|
||||
use mongo_indexed::{doc, Document};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id, find::find_collect,
|
||||
mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -2,6 +2,7 @@ use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::{Repo, RepoActionState, RepoListItem, RepoState},
|
||||
},
|
||||
@@ -9,10 +10,10 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, repo_state_cache},
|
||||
state::{action_states, github_client, repo_state_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -23,10 +24,10 @@ impl Resolve<ReadArgs> for GetRepo {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(
|
||||
get_check_permissions::<Repo>(
|
||||
resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -44,13 +45,8 @@ impl Resolve<ReadArgs> for ListRepos {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Repo>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
resource::list_for_user::<Repo>(self.query, &user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -67,10 +63,7 @@ impl Resolve<ReadArgs> for ListFullRepos {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Repo>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -82,10 +75,10 @@ impl Resolve<ReadArgs> for GetRepoActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<RepoActionState> {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -106,7 +99,6 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
let repos = resource::list_full_for_user::<Repo>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -140,11 +132,7 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => {
|
||||
if !repo.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _)
|
||||
| (RepoState::Pulling, _)
|
||||
@@ -157,3 +145,104 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetRepoWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetRepoWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com"
|
||||
|| repo.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let clone_url =
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id);
|
||||
let pull_url =
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id);
|
||||
let build_url =
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id);
|
||||
|
||||
let mut clone_enabled = false;
|
||||
let mut pull_enabled = false;
|
||||
let mut build_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if !webhook.active {
|
||||
continue;
|
||||
}
|
||||
if webhook.config.url == clone_url {
|
||||
clone_enabled = true
|
||||
}
|
||||
if webhook.config.url == pull_url {
|
||||
pull_enabled = true
|
||||
}
|
||||
if webhook.config.url == build_url {
|
||||
build_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
clone_enabled,
|
||||
pull_enabled,
|
||||
build_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
use futures_util::future::join_all;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
action::Action,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
resource::{ResourceQuery, TemplatesQueryBehavior},
|
||||
schedule::Schedule,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::{get_all_tags, get_last_run_at},
|
||||
resource::list_full_for_user,
|
||||
schedule::get_schedule_item_info,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
impl Resolve<ReadArgs> for ListSchedules {
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &ReadArgs,
|
||||
) -> serror::Result<Vec<Schedule>> {
|
||||
let all_tags = get_all_tags(None).await?;
|
||||
let (actions, procedures) = tokio::try_join!(
|
||||
list_full_for_user::<Action>(
|
||||
ResourceQuery {
|
||||
names: Default::default(),
|
||||
templates: TemplatesQueryBehavior::Include,
|
||||
tag_behavior: self.tag_behavior,
|
||||
tags: self.tags.clone(),
|
||||
specific: Default::default(),
|
||||
},
|
||||
&args.user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
),
|
||||
list_full_for_user::<Procedure>(
|
||||
ResourceQuery {
|
||||
names: Default::default(),
|
||||
templates: TemplatesQueryBehavior::Include,
|
||||
tag_behavior: self.tag_behavior,
|
||||
tags: self.tags.clone(),
|
||||
specific: Default::default(),
|
||||
},
|
||||
&args.user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
)?;
|
||||
let actions = actions.into_iter().map(async |action| {
|
||||
let (next_scheduled_run, schedule_error) =
|
||||
get_schedule_item_info(&ResourceTarget::Action(
|
||||
action.id.clone(),
|
||||
));
|
||||
let last_run_at =
|
||||
get_last_run_at::<Action>(&action.id).await.unwrap_or(None);
|
||||
Schedule {
|
||||
target: ResourceTarget::Action(action.id),
|
||||
name: action.name,
|
||||
enabled: action.config.schedule_enabled,
|
||||
schedule_format: action.config.schedule_format,
|
||||
schedule: action.config.schedule,
|
||||
schedule_timezone: action.config.schedule_timezone,
|
||||
tags: action.tags,
|
||||
last_run_at,
|
||||
next_scheduled_run,
|
||||
schedule_error,
|
||||
}
|
||||
});
|
||||
let procedures = procedures.into_iter().map(async |procedure| {
|
||||
let (next_scheduled_run, schedule_error) =
|
||||
get_schedule_item_info(&ResourceTarget::Procedure(
|
||||
procedure.id.clone(),
|
||||
));
|
||||
let last_run_at = get_last_run_at::<Procedure>(&procedure.id)
|
||||
.await
|
||||
.unwrap_or(None);
|
||||
Schedule {
|
||||
target: ResourceTarget::Procedure(procedure.id),
|
||||
name: procedure.name,
|
||||
enabled: procedure.config.schedule_enabled,
|
||||
schedule_format: procedure.config.schedule_format,
|
||||
schedule: procedure.config.schedule,
|
||||
schedule_timezone: procedure.config.schedule_timezone,
|
||||
tags: procedure.tags,
|
||||
last_run_at,
|
||||
next_scheduled_run,
|
||||
schedule_error,
|
||||
}
|
||||
});
|
||||
let (actions, procedures) =
|
||||
tokio::join!(join_all(actions), join_all(procedures));
|
||||
|
||||
Ok(
|
||||
actions
|
||||
.into_iter()
|
||||
.chain(procedures)
|
||||
.filter(|s| !s.schedule.is_empty())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -4,52 +4,46 @@ use std::{
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{
|
||||
FIFTEEN_SECONDS_MS, get_timelength_in_ms, unix_timestamp_ms,
|
||||
};
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
deployment::Deployment,
|
||||
docker::{
|
||||
container::{
|
||||
Container, ContainerListItem, ContainerStateStatusEnum,
|
||||
},
|
||||
container::{Container, ContainerListItem},
|
||||
image::{Image, ImageHistoryResponseItem},
|
||||
network::Network,
|
||||
volume::Volume,
|
||||
},
|
||||
permission::PermissionLevel,
|
||||
server::{
|
||||
Server, ServerActionState, ServerListItem, ServerQuery,
|
||||
ServerState,
|
||||
Server, ServerActionState, ServerListItem, ServerState,
|
||||
},
|
||||
stack::{Stack, StackServiceNames},
|
||||
stats::{SystemInformation, SystemProcess},
|
||||
update::Log,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use periphery_client::api::{
|
||||
self as periphery,
|
||||
container::InspectContainer,
|
||||
docker::{
|
||||
ImageHistory, InspectImage, InspectNetwork, InspectVolume,
|
||||
},
|
||||
image::{ImageHistory, InspectImage},
|
||||
network::InspectNetwork,
|
||||
volume::InspectVolume,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::{get_check_permissions, list_resources_for_user},
|
||||
resource,
|
||||
stack::compose_container_match_regex,
|
||||
state::{action_states, db_client, server_status_cache},
|
||||
@@ -65,33 +59,21 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
let servers = resource::list_for_user::<Server>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let core_version = env!("CARGO_PKG_VERSION");
|
||||
let mut res = GetServersSummaryResponse::default();
|
||||
|
||||
for server in servers {
|
||||
res.total += 1;
|
||||
match server.info.state {
|
||||
ServerState::Ok => {
|
||||
// Check for version mismatch
|
||||
if matches!(&server.info.version, Some(version) if version != core_version)
|
||||
{
|
||||
res.warning += 1;
|
||||
} else {
|
||||
res.healthy += 1;
|
||||
}
|
||||
res.healthy += 1;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
ServerState::Disabled => {
|
||||
if !server.template {
|
||||
res.disabled += 1;
|
||||
}
|
||||
res.disabled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -99,16 +81,36 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryVersion {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryVersionResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let version = server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("unknown"));
|
||||
Ok(GetPeripheryVersionResponse { version })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServer {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(
|
||||
get_check_permissions::<Server>(
|
||||
resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -126,13 +128,8 @@ impl Resolve<ReadArgs> for ListServers {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Server>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
resource::list_for_user::<Server>(self.query, &user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -149,10 +146,7 @@ impl Resolve<ReadArgs> for ListFullServers {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Server>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -164,10 +158,10 @@ impl Resolve<ReadArgs> for GetServerState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerStateResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let status = server_status_cache()
|
||||
@@ -186,10 +180,10 @@ impl Resolve<ReadArgs> for GetServerActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ServerActionState> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -202,27 +196,14 @@ impl Resolve<ReadArgs> for GetServerActionState {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryInformation {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryInformationResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.periphery_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing Periphery Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
// This protects the peripheries from spam requests
|
||||
const SYSTEM_INFO_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
|
||||
type SystemInfoCache =
|
||||
Mutex<HashMap<String, Arc<(SystemInformation, u128)>>>;
|
||||
fn system_info_cache() -> &'static SystemInfoCache {
|
||||
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
|
||||
OnceLock::new();
|
||||
SYSTEM_INFO_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
@@ -230,22 +211,31 @@ impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<SystemInformation> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing system Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.await?;
|
||||
|
||||
let mut lock = system_info_cache().lock().await;
|
||||
let res = match lock.get(&server.id) {
|
||||
Some(cached) if cached.1 > unix_timestamp_ms() => {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
.request(periphery::stats::GetSystemInformation {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
server.id,
|
||||
(stats.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
|
||||
.into(),
|
||||
);
|
||||
stats
|
||||
}
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -254,21 +244,21 @@ impl Resolve<ReadArgs> for GetSystemStats {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSystemStatsResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_stats
|
||||
let status =
|
||||
server_status_cache().get(&server.id).await.with_context(
|
||||
|| format!("did not find status for server at {}", server.id),
|
||||
)?;
|
||||
let stats = status
|
||||
.stats
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing system stats. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.context("server stats not available")?;
|
||||
Ok(stats.clone())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,10 +276,10 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSystemProcessesResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.processes(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let mut lock = processes_cache().lock().await;
|
||||
@@ -298,8 +288,7 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)
|
||||
.await?
|
||||
let stats = periphery_client(&server)?
|
||||
.request(periphery::stats::GetSystemProcesses {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
@@ -326,10 +315,10 @@ impl Resolve<ReadArgs> for GetHistoricalServerStats {
|
||||
granularity,
|
||||
page,
|
||||
} = self;
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let granularity =
|
||||
@@ -374,10 +363,10 @@ impl Resolve<ReadArgs> for ListDockerContainers {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerContainersResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -397,12 +386,17 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListAllDockerContainersResponse> {
|
||||
let servers = resource::list_for_user::<Server>(
|
||||
ServerQuery::builder().names(self.servers.clone()).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
Default::default(),
|
||||
&user,
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|server| {
|
||||
self.servers.is_empty()
|
||||
|| self.servers.contains(&server.id)
|
||||
|| self.servers.contains(&server.name)
|
||||
});
|
||||
|
||||
let mut containers = Vec::<ContainerListItem>::new();
|
||||
|
||||
@@ -410,60 +404,12 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
let Some(more) = &cache.containers else {
|
||||
continue;
|
||||
};
|
||||
let more = more
|
||||
.iter()
|
||||
.filter(|container| {
|
||||
self.containers.is_empty()
|
||||
|| self.containers.contains(&container.name)
|
||||
})
|
||||
.cloned();
|
||||
containers.extend(more);
|
||||
}
|
||||
|
||||
Ok(containers)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetDockerContainersSummary {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetDockerContainersSummaryResponse> {
|
||||
let servers = resource::list_full_for_user::<Server>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to get servers from db")?;
|
||||
|
||||
let mut res = GetDockerContainersSummaryResponse::default();
|
||||
|
||||
for server in servers {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
if let Some(containers) = &cache.containers {
|
||||
for container in containers {
|
||||
res.total += 1;
|
||||
match container.state {
|
||||
ContainerStateStatusEnum::Created
|
||||
| ContainerStateStatusEnum::Paused
|
||||
| ContainerStateStatusEnum::Exited => res.stopped += 1,
|
||||
ContainerStateStatusEnum::Running => res.running += 1,
|
||||
ContainerStateStatusEnum::Empty => res.unknown += 1,
|
||||
_ => res.unhealthy += 1,
|
||||
}
|
||||
}
|
||||
if let Some(more_containers) = &cache.containers {
|
||||
containers.extend(more_containers.clone());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
Ok(containers)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -472,10 +418,10 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Container> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.inspect(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -490,8 +436,7 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(InspectContainer {
|
||||
name: self.container,
|
||||
})
|
||||
@@ -513,14 +458,13 @@ impl Resolve<ReadArgs> for GetContainerLog {
|
||||
tail,
|
||||
timestamps,
|
||||
} = self;
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(periphery::container::GetContainerLog {
|
||||
name: container,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -545,14 +489,13 @@ impl Resolve<ReadArgs> for SearchContainerLog {
|
||||
invert,
|
||||
timestamps,
|
||||
} = self;
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(periphery::container::GetContainerLogSearch {
|
||||
name: container,
|
||||
terms,
|
||||
@@ -571,10 +514,10 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetResourceMatchingContainerResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// first check deployments
|
||||
@@ -587,12 +530,12 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
|
||||
}
|
||||
|
||||
// then check stacks
|
||||
let stacks = list_resources_for_user::<Stack>(
|
||||
doc! { "config.server_id": &server.id },
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let stacks =
|
||||
resource::list_full_for_user_using_document::<Stack>(
|
||||
doc! { "config.server_id": &server.id },
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// check matching stack
|
||||
for stack in stacks {
|
||||
@@ -632,10 +575,10 @@ impl Resolve<ReadArgs> for ListDockerNetworks {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerNetworksResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -654,10 +597,10 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Network> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -672,8 +615,7 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(InspectNetwork { name: self.network })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -685,10 +627,10 @@ impl Resolve<ReadArgs> for ListDockerImages {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerImagesResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -707,10 +649,10 @@ impl Resolve<ReadArgs> for InspectDockerImage {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Image> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -722,8 +664,7 @@ impl Resolve<ReadArgs> for InspectDockerImage {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(InspectImage { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -735,10 +676,10 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Vec<ImageHistoryResponseItem>> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -753,8 +694,7 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(ImageHistory { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -766,10 +706,10 @@ impl Resolve<ReadArgs> for ListDockerVolumes {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerVolumesResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -788,10 +728,10 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Volume> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -803,8 +743,7 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
let res = periphery_client(&server)?
|
||||
.request(InspectVolume { name: self.volume })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -816,10 +755,10 @@ impl Resolve<ReadArgs> for ListComposeProjects {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListComposeProjectsResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -832,47 +771,3 @@ impl Resolve<ReadArgs> for ListComposeProjects {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// impl Resolve<ReadArgs> for ListAllTerminals {
|
||||
// async fn resolve(
|
||||
// self,
|
||||
// args: &ReadArgs,
|
||||
// ) -> Result<Self::Response, Self::Error> {
|
||||
// // match self.tar
|
||||
// let mut terminals = resource::list_full_for_user::<Server>(
|
||||
// self.query, &args.user, &all_tags,
|
||||
// )
|
||||
// .await?
|
||||
// .into_iter()
|
||||
// .map(|server| async move {
|
||||
// (
|
||||
// list_terminals_inner(&server, self.fresh).await,
|
||||
// (server.id, server.name),
|
||||
// )
|
||||
// })
|
||||
// .collect::<FuturesUnordered<_>>()
|
||||
// .collect::<Vec<_>>()
|
||||
// .await
|
||||
// .into_iter()
|
||||
// .flat_map(|(terminals, server)| {
|
||||
// let terminals = terminals.ok()?;
|
||||
// Some((terminals, server))
|
||||
// })
|
||||
// .flat_map(|(terminals, (server_id, server_name))| {
|
||||
// terminals.into_iter().map(move |info| {
|
||||
// TerminalInfoWithServer::from_terminal_info(
|
||||
// &server_id,
|
||||
// &server_name,
|
||||
// info,
|
||||
// )
|
||||
// })
|
||||
// })
|
||||
// .collect::<Vec<_>>();
|
||||
|
||||
// terminals.sort_by(|a, b| {
|
||||
// a.server_name.cmp(&b.server_name).then(a.name.cmp(&b.name))
|
||||
// });
|
||||
|
||||
// Ok(terminals)
|
||||
// }
|
||||
// }
|
||||
|
||||
97
bin/core/src/api/read/server_template.rs
Normal file
97
bin/core/src/api/read/server_template.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
impl Resolve<ReadArgs> for GetServerTemplate {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerTemplateResponse> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.server_template,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListServerTemplates {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListServerTemplatesResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListFullServerTemplates {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListFullServerTemplatesResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<ServerTemplate>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServerTemplatesSummary {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerTemplatesSummaryResponse> {
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
ServerTemplate,
|
||||
>(&user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let total = db_client()
|
||||
.server_templates
|
||||
.count_documents(query)
|
||||
.await
|
||||
.context("failed to count all server template documents")?;
|
||||
let res = GetServerTemplatesSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -1,27 +1,25 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
docker::container::Container,
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
stack::{Stack, StackActionState, StackListItem, StackState},
|
||||
},
|
||||
};
|
||||
use periphery_client::api::{
|
||||
compose::{GetComposeLog, GetComposeLogSearch},
|
||||
container::InspectContainer,
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeServiceLog, GetComposeServiceLogSearch,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::get_stack_and_server,
|
||||
state::{action_states, server_status_cache, stack_status_cache},
|
||||
state::{action_states, github_client, stack_status_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -32,10 +30,10 @@ impl Resolve<ReadArgs> for GetStack {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(
|
||||
get_check_permissions::<Stack>(
|
||||
resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -47,10 +45,10 @@ impl Resolve<ReadArgs> for ListStackServices {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListStackServicesResponse> {
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -66,125 +64,60 @@ impl Resolve<ReadArgs> for ListStackServices {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetStackLog {
|
||||
impl Resolve<ReadArgs> for GetStackServiceLog {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetStackLogResponse> {
|
||||
let GetStackLog {
|
||||
) -> serror::Result<GetStackServiceLogResponse> {
|
||||
let GetStackServiceLog {
|
||||
stack,
|
||||
services,
|
||||
service,
|
||||
tail,
|
||||
timestamps,
|
||||
} = self;
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLog {
|
||||
let (stack, server) =
|
||||
get_stack_and_server(&stack, user, PermissionLevel::Read, true)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
.request(GetComposeServiceLog {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
service,
|
||||
tail,
|
||||
timestamps,
|
||||
})
|
||||
.await
|
||||
.context("Failed to get stack log from periphery")?;
|
||||
.context("failed to get stack service log from periphery")?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for SearchStackLog {
|
||||
impl Resolve<ReadArgs> for SearchStackServiceLog {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<SearchStackLogResponse> {
|
||||
let SearchStackLog {
|
||||
) -> serror::Result<SearchStackServiceLogResponse> {
|
||||
let SearchStackServiceLog {
|
||||
stack,
|
||||
services,
|
||||
service,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
timestamps,
|
||||
} = self;
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLogSearch {
|
||||
let (stack, server) =
|
||||
get_stack_and_server(&stack, user, PermissionLevel::Read, true)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
.request(GetComposeServiceLogSearch {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
service,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
timestamps,
|
||||
})
|
||||
.await
|
||||
.context("Failed to search stack log from periphery")?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectStackContainer {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Container> {
|
||||
let InspectStackContainer { stack, service } = self;
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Read.inspect(),
|
||||
)
|
||||
.await?;
|
||||
if stack.config.server_id.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Cannot inspect stack, not attached to any server")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let server =
|
||||
resource::get::<Server>(&stack.config.server_id).await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot inspect container: server is {:?}",
|
||||
cache.state
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let services = &stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.curr
|
||||
.services;
|
||||
let Some(name) = services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.and_then(|s| s.container.as_ref().map(|c| c.name.clone()))
|
||||
else {
|
||||
return Err(anyhow!(
|
||||
"No service found matching '{service}'. Was the stack last deployed manually?"
|
||||
).into());
|
||||
};
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
.context("failed to get stack service log from periphery")?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -200,10 +133,7 @@ impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
let stacks = resource::list_full_for_user::<Stack>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
@@ -234,10 +164,7 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
let stacks = resource::list_full_for_user::<Stack>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
@@ -267,29 +194,10 @@ impl Resolve<ReadArgs> for ListStacks {
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
let only_update_available = self.query.specific.update_available;
|
||||
let stacks = resource::list_for_user::<Stack>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
Ok(
|
||||
resource::list_for_user::<Stack>(self.query, user, &all_tags)
|
||||
.await?,
|
||||
)
|
||||
.await?;
|
||||
let stacks = if only_update_available {
|
||||
stacks
|
||||
.into_iter()
|
||||
.filter(|stack| {
|
||||
stack
|
||||
.info
|
||||
.services
|
||||
.iter()
|
||||
.any(|service| service.update_available)
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
stacks
|
||||
};
|
||||
Ok(stacks)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,10 +213,7 @@ impl Resolve<ReadArgs> for ListFullStacks {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Stack>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -320,10 +225,10 @@ impl Resolve<ReadArgs> for GetStackActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<StackActionState> {
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -344,7 +249,6 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
let stacks = resource::list_full_for_user::<Stack>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -361,11 +265,7 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
StackState::Running => res.running += 1,
|
||||
StackState::Stopped | StackState::Paused => res.stopped += 1,
|
||||
StackState::Down => res.down += 1,
|
||||
StackState::Unknown => {
|
||||
if !stack.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
StackState::Unknown => res.unknown += 1,
|
||||
_ => res.unhealthy += 1,
|
||||
}
|
||||
}
|
||||
@@ -373,3 +273,91 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetStackWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetStackWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com"
|
||||
|| stack.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id);
|
||||
let deploy_url =
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut deploy_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == deploy_url {
|
||||
deploy_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
deploy_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,17 +2,21 @@ use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
sync::{
|
||||
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
|
||||
ResourceSyncState,
|
||||
},
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::action_states,
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
resource,
|
||||
state::{action_states, github_client, resource_sync_state_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -23,10 +27,10 @@ impl Resolve<ReadArgs> for GetResourceSync {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ResourceSync> {
|
||||
Ok(
|
||||
get_check_permissions::<ResourceSync>(
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -45,10 +49,7 @@ impl Resolve<ReadArgs> for ListResourceSyncs {
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<ResourceSync>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -67,10 +68,7 @@ impl Resolve<ReadArgs> for ListFullResourceSyncs {
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
self.query, &user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -82,14 +80,14 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ResourceSyncActionState> {
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.sync
|
||||
.resource_sync
|
||||
.get(&sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -107,7 +105,6 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
@@ -115,6 +112,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
|
||||
let mut res = GetResourceSyncsSummaryResponse::default();
|
||||
|
||||
let cache = resource_sync_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for resource_sync in resource_syncs {
|
||||
@@ -133,20 +131,120 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
res.failed += 1;
|
||||
continue;
|
||||
}
|
||||
if action_states
|
||||
.sync
|
||||
.get(&resource_sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?
|
||||
.syncing
|
||||
{
|
||||
res.syncing += 1;
|
||||
continue;
|
||||
|
||||
match (
|
||||
cache.get(&resource_sync.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.resource_sync
|
||||
.get(&resource_sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.syncing => {
|
||||
res.syncing += 1;
|
||||
}
|
||||
(ResourceSyncState::Ok, _) => res.ok += 1,
|
||||
(ResourceSyncState::Failed, _) => res.failed += 1,
|
||||
(ResourceSyncState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(ResourceSyncState::Syncing, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
(ResourceSyncState::Pending, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
res.ok += 1;
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSyncWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSyncWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.git_provider != "github.com"
|
||||
|| sync.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = sync.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id);
|
||||
let sync_url =
|
||||
format!("{host}/listener/github/sync/{}/sync", sync.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut sync_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == sync_url {
|
||||
sync_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
sync_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
find::find_collect, mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{GetTag, ListTags},
|
||||
entities::tag::Tag,
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_tag, state::db_client};
|
||||
|
||||
@@ -1,247 +0,0 @@
|
||||
use anyhow::Context as _;
|
||||
use futures_util::{
|
||||
FutureExt, StreamExt as _, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{ListTerminals, ListTerminalsResponse},
|
||||
entities::{
|
||||
deployment::Deployment,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
terminal::{Terminal, TerminalTarget},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
helpers::periphery_client, permission::get_check_permissions,
|
||||
resource,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ReadArgs> for ListTerminals {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListTerminalsResponse> {
|
||||
let Some(target) = self.target else {
|
||||
return list_all_terminals_for_user(user, self.use_names).await;
|
||||
};
|
||||
match &target {
|
||||
TerminalTarget::Server { server } => {
|
||||
let server = server
|
||||
.as_ref()
|
||||
.context("Must provide 'target.params.server'")
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
let server = get_check_permissions::<Server>(
|
||||
server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
TerminalTarget::Container { server, .. } => {
|
||||
let server = get_check_permissions::<Server>(
|
||||
server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
TerminalTarget::Stack { stack, .. } => {
|
||||
let server = get_check_permissions::<Stack>(
|
||||
stack,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.server_id;
|
||||
let server = resource::get::<Server>(&server).await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
TerminalTarget::Deployment { deployment } => {
|
||||
let server = get_check_permissions::<Deployment>(
|
||||
deployment,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.server_id;
|
||||
let server = resource::get::<Server>(&server).await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_all_terminals_for_user(
|
||||
user: &User,
|
||||
use_names: bool,
|
||||
) -> serror::Result<Vec<Terminal>> {
|
||||
let (mut servers, stacks, deployments) = tokio::try_join!(
|
||||
resource::list_full_for_user::<Server>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&[]
|
||||
)
|
||||
.map(|res| res.map(|servers| servers
|
||||
.into_iter()
|
||||
// true denotes user actually has permission on this Server.
|
||||
.map(|server| (server, true))
|
||||
.collect::<Vec<_>>())),
|
||||
resource::list_full_for_user::<Stack>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&[]
|
||||
),
|
||||
resource::list_full_for_user::<Deployment>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&[]
|
||||
),
|
||||
)?;
|
||||
|
||||
// Ensure any missing servers are present to query
|
||||
for stack in &stacks {
|
||||
if !stack.config.server_id.is_empty()
|
||||
&& !servers
|
||||
.iter()
|
||||
.any(|(server, _)| server.id == stack.config.server_id)
|
||||
{
|
||||
let server =
|
||||
resource::get::<Server>(&stack.config.server_id).await?;
|
||||
servers.push((server, false));
|
||||
}
|
||||
}
|
||||
for deployment in &deployments {
|
||||
if !deployment.config.server_id.is_empty()
|
||||
&& !servers
|
||||
.iter()
|
||||
.any(|(server, _)| server.id == deployment.config.server_id)
|
||||
{
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
servers.push((server, false));
|
||||
}
|
||||
}
|
||||
|
||||
let mut terminals = servers
|
||||
.into_iter()
|
||||
.map(|(server, server_permission)| async move {
|
||||
(
|
||||
list_terminals_on_server(&server, None).await,
|
||||
(server.id, server.name, server_permission),
|
||||
)
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flat_map(
|
||||
|(terminals, (server_id, server_name, server_permission))| {
|
||||
let terminals = terminals
|
||||
.ok()?
|
||||
.into_iter()
|
||||
.filter_map(|mut terminal| {
|
||||
// Only keep terminals with appropriate perms.
|
||||
match terminal.target.clone() {
|
||||
TerminalTarget::Server { .. } => server_permission
|
||||
.then(|| {
|
||||
terminal.target = TerminalTarget::Server {
|
||||
server: Some(if use_names {
|
||||
server_name.clone()
|
||||
} else {
|
||||
server_id.clone()
|
||||
}),
|
||||
};
|
||||
terminal
|
||||
}),
|
||||
TerminalTarget::Container { container, .. } => {
|
||||
server_permission.then(|| {
|
||||
terminal.target = TerminalTarget::Container {
|
||||
server: if use_names {
|
||||
server_name.clone()
|
||||
} else {
|
||||
server_id.clone()
|
||||
},
|
||||
container,
|
||||
};
|
||||
terminal
|
||||
})
|
||||
}
|
||||
TerminalTarget::Stack { stack, service } => {
|
||||
stacks.iter().find(|s| s.id == stack).map(|s| {
|
||||
terminal.target = TerminalTarget::Stack {
|
||||
stack: if use_names {
|
||||
s.name.clone()
|
||||
} else {
|
||||
s.id.clone()
|
||||
},
|
||||
service,
|
||||
};
|
||||
terminal
|
||||
})
|
||||
}
|
||||
TerminalTarget::Deployment { deployment } => {
|
||||
deployments.iter().find(|d| d.id == deployment).map(
|
||||
|d| {
|
||||
terminal.target = TerminalTarget::Deployment {
|
||||
deployment: if use_names {
|
||||
d.name.clone()
|
||||
} else {
|
||||
d.id.clone()
|
||||
},
|
||||
};
|
||||
terminal
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Some(terminals)
|
||||
},
|
||||
)
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
terminals.sort_by(|a, b| {
|
||||
a.target.cmp(&b.target).then(a.name.cmp(&b.name))
|
||||
});
|
||||
|
||||
Ok(terminals)
|
||||
}
|
||||
|
||||
async fn list_terminals_on_server(
|
||||
server: &Server,
|
||||
target: Option<TerminalTarget>,
|
||||
) -> serror::Result<Vec<Terminal>> {
|
||||
periphery_client(server)
|
||||
.await?
|
||||
.request(periphery_client::api::terminal::ListTerminals {
|
||||
target,
|
||||
})
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get Terminal list from Server {} ({})",
|
||||
server.name, server.id
|
||||
)
|
||||
})
|
||||
.map_err(Into::into)
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
|
||||
@@ -7,185 +6,173 @@ use komodo_client::{
|
||||
ListUserGroups,
|
||||
},
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment,
|
||||
permission::PermissionLevel, procedure::Procedure, repo::Repo,
|
||||
resource::ResourceQuery, server::Server, stack::Stack,
|
||||
sync::ResourceSync, toml::ResourcesToml, user::User,
|
||||
action::Action, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
procedure::Procedure, repo::Repo, resource::ResourceQuery,
|
||||
server::Server, server_template::ServerTemplate, stack::Stack,
|
||||
sync::ResourceSync, toml::ResourcesToml, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::{
|
||||
get_all_tags, get_id_to_tags, get_user_user_group_ids,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::db_client,
|
||||
sync::{
|
||||
toml::{ToToml, convert_resource},
|
||||
user_groups::{convert_user_groups, user_group_to_toml},
|
||||
variables::variable_to_toml,
|
||||
toml::{convert_resource, ToToml, TOML_PRETTY_OPTIONS},
|
||||
user_groups::convert_user_groups,
|
||||
AllResourcesById,
|
||||
},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
async fn get_all_targets(
|
||||
tags: &[String],
|
||||
user: &User,
|
||||
) -> anyhow::Result<Vec<ResourceTarget>> {
|
||||
let mut targets = Vec::<ResourceTarget>::new();
|
||||
let all_tags = if tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Alerter>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Builder>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Server>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Stack>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Stack(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Deployment>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Build>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Repo>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Procedure>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<Action>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Action(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
// These will already be filtered by [ExportResourcesToToml]
|
||||
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
|
||||
);
|
||||
Ok(targets)
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ExportAllResourcesToToml {
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &ReadArgs,
|
||||
) -> serror::Result<ExportAllResourcesToTomlResponse> {
|
||||
let targets = if self.include_resources {
|
||||
get_all_targets(&self.tags, &args.user).await?
|
||||
let mut targets = Vec::<ResourceTarget>::new();
|
||||
|
||||
let all_tags = if self.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
Vec::new()
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
|
||||
let user_groups = if self.include_user_groups {
|
||||
if args.user.admin {
|
||||
find_collect(&db_client().user_groups, None, None)
|
||||
.await
|
||||
.context("failed to query db for user groups")?
|
||||
.into_iter()
|
||||
.map(|user_group| user_group.id)
|
||||
.collect()
|
||||
} else {
|
||||
get_user_user_group_ids(&args.user.id).await?
|
||||
}
|
||||
let ReadArgs { user } = args;
|
||||
|
||||
targets.extend(
|
||||
resource::list_for_user::<Alerter>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Builder>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Server>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Stack>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Stack(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Deployment>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Build>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Repo>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Procedure>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Action>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Action(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
ResourceQuery::builder().tags(self.tags.clone()).build(),
|
||||
&user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
// These will already be filtered by [ExportResourcesToToml]
|
||||
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
|
||||
);
|
||||
|
||||
let user_groups = if user.admin && self.tags.is_empty() {
|
||||
find_collect(&db_client().user_groups, None, None)
|
||||
.await
|
||||
.context("failed to query db for user groups")?
|
||||
.into_iter()
|
||||
.map(|user_group| user_group.id)
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
get_user_user_group_ids(&user.id).await?
|
||||
};
|
||||
|
||||
ExportResourcesToToml {
|
||||
targets,
|
||||
user_groups,
|
||||
include_variables: self.include_variables,
|
||||
include_variables: self.tags.is_empty(),
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
@@ -203,18 +190,18 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
include_variables,
|
||||
} = self;
|
||||
let mut res = ResourcesToml::default();
|
||||
let all = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
let ReadArgs { user } = args;
|
||||
for target in targets {
|
||||
match target {
|
||||
ResourceTarget::Alerter(id) => {
|
||||
let mut alerter = get_check_permissions::<Alerter>(
|
||||
let alerter = resource::get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Alerter::replace_ids(&mut alerter);
|
||||
res.alerters.push(convert_resource::<Alerter>(
|
||||
alerter,
|
||||
false,
|
||||
@@ -223,18 +210,16 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let mut sync = get_check_permissions::<ResourceSync>(
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if sync.config.file_contents.is_empty()
|
||||
&& (sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty()
|
||||
|| !sync.config.linked_repo.is_empty())
|
||||
|| !sync.config.repo.is_empty())
|
||||
{
|
||||
ResourceSync::replace_ids(&mut sync);
|
||||
res.resource_syncs.push(convert_resource::<ResourceSync>(
|
||||
sync,
|
||||
false,
|
||||
@@ -243,14 +228,29 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let mut server = get_check_permissions::<Server>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
let template = resource::get_check_permissions::<
|
||||
ServerTemplate,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
res.server_templates.push(
|
||||
convert_resource::<ServerTemplate>(
|
||||
template,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
),
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Server::replace_ids(&mut server);
|
||||
res.servers.push(convert_resource::<Server>(
|
||||
server,
|
||||
false,
|
||||
@@ -259,13 +259,14 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder = get_check_permissions::<Builder>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder);
|
||||
let mut builder =
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder, &all);
|
||||
res.builders.push(convert_resource::<Builder>(
|
||||
builder,
|
||||
false,
|
||||
@@ -274,13 +275,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Build::replace_ids(&mut build);
|
||||
Build::replace_ids(&mut build, &all);
|
||||
res.builds.push(convert_resource::<Build>(
|
||||
build,
|
||||
false,
|
||||
@@ -289,13 +290,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
let mut deployment = get_check_permissions::<Deployment>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
let mut deployment = resource::get_check_permissions::<
|
||||
Deployment,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
Deployment::replace_ids(&mut deployment);
|
||||
Deployment::replace_ids(&mut deployment, &all);
|
||||
res.deployments.push(convert_resource::<Deployment>(
|
||||
deployment,
|
||||
false,
|
||||
@@ -304,13 +305,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Repo::replace_ids(&mut repo);
|
||||
Repo::replace_ids(&mut repo, &all);
|
||||
res.repos.push(convert_resource::<Repo>(
|
||||
repo,
|
||||
false,
|
||||
@@ -319,13 +320,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
let mut stack = get_check_permissions::<Stack>(
|
||||
let mut stack = resource::get_check_permissions::<Stack>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Stack::replace_ids(&mut stack);
|
||||
Stack::replace_ids(&mut stack, &all);
|
||||
res.stacks.push(convert_resource::<Stack>(
|
||||
stack,
|
||||
false,
|
||||
@@ -334,13 +335,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
let mut procedure = get_check_permissions::<Procedure>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
let mut procedure = resource::get_check_permissions::<
|
||||
Procedure,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
Procedure::replace_ids(&mut procedure);
|
||||
Procedure::replace_ids(&mut procedure, &all);
|
||||
res.procedures.push(convert_resource::<Procedure>(
|
||||
procedure,
|
||||
false,
|
||||
@@ -349,13 +350,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
));
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
let mut action = get_check_permissions::<Action>(
|
||||
let mut action = resource::get_check_permissions::<Action>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Action::replace_ids(&mut action);
|
||||
Action::replace_ids(&mut action, &all);
|
||||
res.actions.push(convert_resource::<Action>(
|
||||
action,
|
||||
false,
|
||||
@@ -367,7 +368,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
};
|
||||
}
|
||||
|
||||
add_user_groups(user_groups, &mut res, args)
|
||||
add_user_groups(user_groups, &mut res, &all, args)
|
||||
.await
|
||||
.context("failed to add user groups")?;
|
||||
|
||||
@@ -396,6 +397,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
async fn add_user_groups(
|
||||
user_groups: Vec<String>,
|
||||
res: &mut ResourcesToml,
|
||||
all: &AllResourcesById,
|
||||
args: &ReadArgs,
|
||||
) -> anyhow::Result<()> {
|
||||
let user_groups = ListUserGroups {}
|
||||
@@ -407,7 +409,7 @@ async fn add_user_groups(
|
||||
user_groups.contains(&ug.name) || user_groups.contains(&ug.id)
|
||||
});
|
||||
let mut ug = Vec::with_capacity(user_groups.size_hint().0);
|
||||
convert_user_groups(user_groups, &mut ug).await?;
|
||||
convert_user_groups(user_groups, all, &mut ug).await?;
|
||||
res.user_groups = ug.into_iter().map(|ug| ug.1).collect();
|
||||
|
||||
Ok(())
|
||||
@@ -490,6 +492,14 @@ fn serialize_resources_toml(
|
||||
Builder::push_to_toml_string(builder, &mut toml)?;
|
||||
}
|
||||
|
||||
for server_template in resources.server_templates {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[server_template]]\n");
|
||||
ServerTemplate::push_to_toml_string(server_template, &mut toml)?;
|
||||
}
|
||||
|
||||
for resource_sync in resources.resource_syncs {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
@@ -502,14 +512,22 @@ fn serialize_resources_toml(
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str(&variable_to_toml(variable)?);
|
||||
toml.push_str("[[variable]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize variables to toml")?,
|
||||
);
|
||||
}
|
||||
|
||||
for user_group in resources.user_groups {
|
||||
for user_group in &resources.user_groups {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str(&user_group_to_toml(user_group)?);
|
||||
toml.push_str("[[user_group]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(user_group, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize user_groups to toml")?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(toml)
|
||||
|
||||
@@ -1,15 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
action::Action,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
@@ -19,19 +13,22 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Update, UpdateListItem},
|
||||
user::User,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
permission::{get_check_permissions, list_resource_ids_for_user},
|
||||
state::db_client,
|
||||
};
|
||||
use crate::{config::core_config, resource, state::db_client};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
@@ -45,129 +42,109 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
self.query
|
||||
} else {
|
||||
let server_query = list_resource_ids_for_user::<Server>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
let server_query =
|
||||
resource::get_resource_ids_for_user::<Server>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
|
||||
let deployment_query =
|
||||
list_resource_ids_for_user::<Deployment>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Deployment", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
resource::get_resource_ids_for_user::<Deployment>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Deployment", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
|
||||
let stack_query = list_resource_ids_for_user::<Stack>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
let stack_query =
|
||||
resource::get_resource_ids_for_user::<Stack>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
|
||||
let build_query = list_resource_ids_for_user::<Build>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
let build_query =
|
||||
resource::get_resource_ids_for_user::<Build>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
|
||||
let repo_query = list_resource_ids_for_user::<Repo>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Repo", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
let repo_query =
|
||||
resource::get_resource_ids_for_user::<Repo>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Repo", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
|
||||
let procedure_query = list_resource_ids_for_user::<Procedure>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Procedure", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
let procedure_query =
|
||||
resource::get_resource_ids_for_user::<Procedure>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Procedure", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
|
||||
let action_query = list_resource_ids_for_user::<Action>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Action", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Action" });
|
||||
let action_query =
|
||||
resource::get_resource_ids_for_user::<Action>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Action", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Action" });
|
||||
|
||||
let builder_query = list_resource_ids_for_user::<Builder>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
let builder_query =
|
||||
resource::get_resource_ids_for_user::<Builder>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
|
||||
let alerter_query = list_resource_ids_for_user::<Alerter>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
let alerter_query =
|
||||
resource::get_resource_ids_for_user::<Alerter>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let server_template_query =
|
||||
resource::get_resource_ids_for_user::<ServerTemplate>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ServerTemplate", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
|
||||
|
||||
let resource_sync_query =
|
||||
list_resource_ids_for_user::<ResourceSync>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
resource::get_resource_ids_for_user::<ResourceSync>(
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
@@ -189,6 +166,7 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
action_query,
|
||||
alerter_query,
|
||||
builder_query,
|
||||
server_template_query,
|
||||
resource_sync_query,
|
||||
]
|
||||
});
|
||||
@@ -264,85 +242,93 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!("user must be admin to view system updates").into(),
|
||||
);
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
get_check_permissions::<Server>(
|
||||
resource::get_check_permissions::<Server>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
get_check_permissions::<Deployment>(
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
get_check_permissions::<Build>(
|
||||
resource::get_check_permissions::<Build>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
get_check_permissions::<Repo>(
|
||||
resource::get_check_permissions::<Repo>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
get_check_permissions::<Builder>(
|
||||
resource::get_check_permissions::<Builder>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
get_check_permissions::<Alerter>(
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
get_check_permissions::<Procedure>(
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
get_check_permissions::<Action>(
|
||||
resource::get_check_permissions::<Action>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
get_check_permissions::<ResourceSync>(
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
get_check_permissions::<Stack>(
|
||||
resource::get_check_permissions::<Stack>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
@@ -1,9 +1,4 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
|
||||
@@ -11,7 +6,12 @@ use komodo_client::{
|
||||
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
|
||||
ListUsers, ListUsersResponse,
|
||||
},
|
||||
entities::user::{UserConfig, admin_service_user},
|
||||
entities::user::{admin_service_user, UserConfig},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
use komodo_client::api::read::*;
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId},
|
||||
bson::{doc, oid::ObjectId, Document},
|
||||
options::FindOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
find::find_collect, mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_variable, state::db_client};
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use komodo_client::{api::terminal::*, entities::user::User};
|
||||
use serror::Json;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, helpers::terminal::setup_target_for_user,
|
||||
};
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/execute", post(execute_terminal))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
// =================
|
||||
// ExecuteTerminal
|
||||
// =================
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteTerminal",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
target,
|
||||
terminal,
|
||||
init = format!("{init:?}")
|
||||
)
|
||||
)]
|
||||
async fn execute_terminal(
|
||||
Extension(user): Extension<User>,
|
||||
Json(ExecuteTerminalBody {
|
||||
target,
|
||||
terminal,
|
||||
command,
|
||||
init,
|
||||
}): Json<ExecuteTerminalBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute request | user: {}", user.username);
|
||||
|
||||
let (target, terminal, periphery) =
|
||||
setup_target_for_user(target, terminal, init, &user).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_terminal(target, terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on Terminal")?;
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
@@ -1,32 +1,26 @@
|
||||
use std::{collections::VecDeque, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
Extension, Json, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_bson,
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Json, Router};
|
||||
use derive_variants::EnumVariants;
|
||||
use komodo_client::entities::random_string;
|
||||
use komodo_client::{
|
||||
api::user::*,
|
||||
entities::{api_key::ApiKey, komodo_timestamp, user::User},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, helpers::query::get_user, state::db_client,
|
||||
auth::auth_request,
|
||||
helpers::{query::get_user, random_string},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub struct UserArgs {
|
||||
pub user: User,
|
||||
}
|
||||
@@ -49,22 +43,10 @@ enum UserRequest {
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: UserRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "UserHandler", level = "debug", skip(user))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<UserRequest>,
|
||||
@@ -87,6 +69,11 @@ async fn handler(
|
||||
const RECENTLY_VIEWED_MAX: usize = 10;
|
||||
|
||||
impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
#[instrument(
|
||||
name = "PushRecentlyViewed",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -111,7 +98,7 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
database::mungos::update::Update::Set(update),
|
||||
mungos::update::Update::Set(update),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
@@ -124,6 +111,11 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
}
|
||||
|
||||
impl Resolve<UserArgs> for SetLastSeenUpdate {
|
||||
#[instrument(
|
||||
name = "SetLastSeenUpdate",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -131,7 +123,7 @@ impl Resolve<UserArgs> for SetLastSeenUpdate {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
database::mungos::update::Update::Set(doc! {
|
||||
mungos::update::Update::Set(doc! {
|
||||
"last_update_view": komodo_timestamp()
|
||||
}),
|
||||
None,
|
||||
@@ -146,11 +138,7 @@ const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
impl Resolve<UserArgs> for CreateApiKey {
|
||||
#[instrument(
|
||||
"CreateApiKey",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
#[instrument(name = "CreateApiKey", level = "debug", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -180,11 +168,7 @@ impl Resolve<UserArgs> for CreateApiKey {
|
||||
}
|
||||
|
||||
impl Resolve<UserArgs> for DeleteApiKey {
|
||||
#[instrument(
|
||||
"DeleteApiKey",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
#[instrument(name = "DeleteApiKey", level = "debug", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
|
||||
@@ -6,64 +6,45 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateAction {
|
||||
#[instrument(
|
||||
"CreateAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateAction", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
resource::create::<Action>(&self.name, self.config, None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyAction {
|
||||
#[instrument(
|
||||
"CopyAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.name,
|
||||
copy_action = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyAction", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
let Action { config, .. } = get_check_permissions::<Action>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
let Action { config, .. } =
|
||||
resource::get_check_permissions::<Action>(
|
||||
&self.id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, config.into(), &user)
|
||||
.await?,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Action>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateAction {
|
||||
#[instrument(
|
||||
"UpdateAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateAction", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -73,15 +54,7 @@ impl Resolve<WriteArgs> for UpdateAction {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameAction {
|
||||
#[instrument(
|
||||
"RenameAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameAction", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -91,18 +64,8 @@ impl Resolve<WriteArgs> for RenameAction {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAction {
|
||||
#[instrument(
|
||||
"DeleteAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(resource::delete::<Action>(&self.id, user).await?)
|
||||
#[instrument(name = "DeleteAction", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Action> {
|
||||
Ok(resource::delete::<Action>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{api::write::CloseAlert, entities::NoData};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
impl Resolve<WriteArgs> for CloseAlert {
|
||||
#[instrument(
|
||||
"CloseAlert",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
alert_id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.alerts
|
||||
.update_one(
|
||||
doc! { "_id": ObjectId::from_str(&self.id)? },
|
||||
doc! { "$set": { "resolved": true } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to close Alert on database")?;
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
@@ -6,81 +6,55 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateAlerter {
|
||||
#[instrument(
|
||||
"CreateAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateAlerter", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
resource::create::<Alerter>(&self.name, self.config, None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyAlerter {
|
||||
#[instrument(
|
||||
"CopyAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.name,
|
||||
copy_alerter = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyAlerter", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
let Alerter { config, .. } = get_check_permissions::<Alerter>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
let Alerter { config, .. } =
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
&self.id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Alerter>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAlerter {
|
||||
#[instrument(
|
||||
"DeleteAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteAlerter", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(resource::delete::<Alerter>(&self.id, user).await?)
|
||||
Ok(resource::delete::<Alerter>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateAlerter {
|
||||
#[instrument(
|
||||
"UpdateAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateAlerter", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -93,15 +67,7 @@ impl Resolve<WriteArgs> for UpdateAlerter {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameAlerter {
|
||||
#[instrument(
|
||||
"RenameAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameAlerter", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -1,119 +1,75 @@
|
||||
use std::{path::PathBuf, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::to_document;
|
||||
use database::{
|
||||
mongo_indexed::doc, mungos::mongodb::bson::oid::ObjectId,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use anyhow::{anyhow, Context};
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
build::{Build, BuildInfo},
|
||||
builder::{Builder, BuilderConfig},
|
||||
build::{Build, BuildInfo, PartialBuildConfig},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
update::Update,
|
||||
CloneArgs, NoData,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
use mongo_indexed::doc;
|
||||
use mungos::mongodb::bson::to_document;
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
connection::PeripheryConnectionArgs,
|
||||
helpers::{
|
||||
git_token, periphery_client,
|
||||
query::get_server_with_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
periphery::PeripheryClient,
|
||||
permission::get_check_permissions,
|
||||
helpers::git_token,
|
||||
resource,
|
||||
state::db_client,
|
||||
state::{db_client, github_client},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuild {
|
||||
#[instrument(
|
||||
"CreateBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateBuild", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
resource::create::<Build>(&self.name, self.config, None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyBuild {
|
||||
#[instrument(
|
||||
"CopyBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.name,
|
||||
copy_build = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyBuild", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
let Build { mut config, .. } = get_check_permissions::<Build>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let Build { mut config, .. } =
|
||||
resource::get_check_permissions::<Build>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
// reset version to 0.0.0
|
||||
config.version = Default::default();
|
||||
resource::create::<Build>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuild {
|
||||
#[instrument(
|
||||
"DeleteBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(resource::delete::<Build>(&self.id, user).await?)
|
||||
#[instrument(name = "DeleteBuild", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Build> {
|
||||
Ok(resource::delete::<Build>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateBuild {
|
||||
#[instrument(
|
||||
"UpdateBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateBuild", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -123,15 +79,7 @@ impl Resolve<WriteArgs> for UpdateBuild {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameBuild {
|
||||
#[instrument(
|
||||
"RenameBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameBuild", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -140,310 +88,76 @@ impl Resolve<WriteArgs> for RenameBuild {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for WriteBuildFileContents {
|
||||
#[instrument(
|
||||
"WriteBuildFileContents",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !build.config.files_on_host
|
||||
&& build.config.repo.is_empty()
|
||||
&& build.config.linked_repo.is_empty()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"Build is not configured to use Files on Host or Git Repo, can't write dockerfile contents"
|
||||
).into());
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&build, Operation::WriteDockerfile, &args.user);
|
||||
|
||||
update.push_simple_log("Dockerfile to write", &self.contents);
|
||||
|
||||
if build.config.files_on_host {
|
||||
match get_on_host_periphery(&build)
|
||||
.await?
|
||||
.request(WriteDockerfileContentsToHost {
|
||||
name: build.name,
|
||||
build_path: build.config.build_path,
|
||||
dockerfile_path: build.config.dockerfile_path,
|
||||
contents: self.contents,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write dockerfile contents to host")
|
||||
{
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write Dockerfile Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
(RefreshBuildCache { build: build.id }).resolve(args).await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh build cache",
|
||||
format_serror(&e.error.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
} else {
|
||||
write_dockerfile_contents_git(self, args, build, update).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("WriteDockerfileContentsGit", skip_all)]
|
||||
async fn write_dockerfile_contents_git(
|
||||
req: WriteBuildFileContents,
|
||||
args: &WriteArgs,
|
||||
build: Build,
|
||||
mut update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
let WriteBuildFileContents { build: _, contents } = req;
|
||||
|
||||
let mut repo_args: RepoExecutionArgs = if !build
|
||||
.config
|
||||
.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
(&crate::resource::get::<Repo>(&build.config.linked_repo).await?)
|
||||
.into()
|
||||
} else {
|
||||
(&build).into()
|
||||
};
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let build_path = build
|
||||
.config
|
||||
.build_path
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid build path")?;
|
||||
let dockerfile_path = build
|
||||
.config
|
||||
.dockerfile_path
|
||||
.parse::<PathBuf>()
|
||||
.context("Invalid dockerfile path")?;
|
||||
|
||||
let full_path = root.join(&build_path).join(&dockerfile_path);
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize dockerfile parent directory {parent:?}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
let access_token = if let Some(account) = &repo_args.account {
|
||||
git_token(&repo_args.provider, account, |https| repo_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Ensure the folder is initialized as git repo.
|
||||
// This allows a new file to be committed on a branch that may not exist.
|
||||
if !root.join(".git").exists() {
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&repo_args,
|
||||
access_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
.await;
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok((res, _)) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) = secret_file::write_async(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write dockerfile contents to {full_path:?}")
|
||||
})
|
||||
{
|
||||
update
|
||||
.push_error_log("Write Dockerfile", format_serror(&e.into()));
|
||||
} else {
|
||||
update.push_simple_log(
|
||||
"Write Dockerfile",
|
||||
format!("File written to {full_path:?}"),
|
||||
);
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
let commit_res = git::commit_file(
|
||||
&format!("{}: Commit Dockerfile", args.user.username),
|
||||
&root,
|
||||
&build_path.join(&dockerfile_path),
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
update.logs.extend(commit_res.logs);
|
||||
|
||||
if let Err(e) = (RefreshBuildCache { build: build.name })
|
||||
.resolve(args)
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh build cache",
|
||||
format_serror(&e.error.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
#[instrument(
|
||||
name = "RefreshBuildCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// build should be able to do this.
|
||||
let build = get_check_permissions::<Build>(
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
if build.config.repo.is_empty()
|
||||
|| build.config.git_provider.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&build.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let config = core_config();
|
||||
|
||||
let mut clone_args: CloneArgs = (&build).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let RemoteDockerfileContents {
|
||||
path,
|
||||
contents,
|
||||
error,
|
||||
hash,
|
||||
message,
|
||||
} = if build.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
match get_on_host_dockerfile(&build).await {
|
||||
Ok(FileContents { path, contents }) => {
|
||||
RemoteDockerfileContents {
|
||||
path: Some(path),
|
||||
contents: Some(contents),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
Err(e) => RemoteDockerfileContents {
|
||||
error: Some(format_serror(&e.into())),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
} else if let Some(repo) = &repo {
|
||||
let Some(res) = get_git_remote(&build, repo.into()).await?
|
||||
else {
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
};
|
||||
res
|
||||
} else if !build.config.repo.is_empty() {
|
||||
let Some(res) = get_git_remote(&build, (&build).into()).await?
|
||||
else {
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
};
|
||||
res
|
||||
} else {
|
||||
// =============
|
||||
// UI BASED FILE
|
||||
// =============
|
||||
RemoteDockerfileContents::default()
|
||||
};
|
||||
let GitRes {
|
||||
hash: latest_hash,
|
||||
message: latest_message,
|
||||
..
|
||||
} = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
|
||||
let info = BuildInfo {
|
||||
last_built_at: build.info.last_built_at,
|
||||
built_hash: build.info.built_hash,
|
||||
built_message: build.info.built_message,
|
||||
built_contents: build.info.built_contents,
|
||||
remote_path: path,
|
||||
remote_contents: contents,
|
||||
remote_error: error,
|
||||
latest_hash: hash,
|
||||
latest_message: message,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
@@ -462,143 +176,200 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_on_host_periphery(
|
||||
build: &Build,
|
||||
) -> anyhow::Result<PeripheryClient> {
|
||||
if build.config.builder_id.is_empty() {
|
||||
return Err(anyhow!("No builder associated with build"));
|
||||
}
|
||||
|
||||
let builder = resource::get::<Builder>(&build.config.builder_id)
|
||||
.await
|
||||
.context("Failed to get builder")?;
|
||||
|
||||
match builder.config {
|
||||
BuilderConfig::Aws(_) => {
|
||||
Err(anyhow!("Files on host doesn't work with AWS builder"))
|
||||
}
|
||||
BuilderConfig::Url(config) => {
|
||||
// TODO: Ensure connection is actually established.
|
||||
// Builder id no good because it may be active for multiple connections.
|
||||
let periphery = PeripheryClient::new(
|
||||
PeripheryConnectionArgs::from_url_builder(
|
||||
&ObjectId::new().to_hex(),
|
||||
&config,
|
||||
),
|
||||
config.insecure_tls,
|
||||
)
|
||||
.await?;
|
||||
// Poll for connection to be estalished
|
||||
let mut err = None;
|
||||
for _ in 0..10 {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
match periphery.health_check().await {
|
||||
Ok(_) => return Ok(periphery),
|
||||
Err(e) => err = Some(e),
|
||||
};
|
||||
}
|
||||
Err(err.context("Missing error")?)
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Builder is type server, but has no server attached"
|
||||
));
|
||||
}
|
||||
let (server, state) =
|
||||
get_server_with_state(&config.server_id).await?;
|
||||
if state != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"Builder server is disabled or not reachable"
|
||||
));
|
||||
};
|
||||
periphery_client(&server).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The successful case will be included as Some(remote_contents).
|
||||
/// The error case will be included as Some(remote_error)
|
||||
async fn get_on_host_dockerfile(
|
||||
build: &Build,
|
||||
) -> anyhow::Result<FileContents> {
|
||||
get_on_host_periphery(build)
|
||||
.await?
|
||||
.request(GetDockerfileContentsOnHost {
|
||||
name: build.name.clone(),
|
||||
build_path: build.config.build_path.clone(),
|
||||
dockerfile_path: build.config.dockerfile_path.clone(),
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_git_remote(
|
||||
build: &Build,
|
||||
mut clone_args: RepoExecutionArgs,
|
||||
) -> anyhow::Result<Option<RemoteDockerfileContents>> {
|
||||
if clone_args.provider.is_empty() {
|
||||
// Nothing to do here
|
||||
return Ok(None);
|
||||
}
|
||||
let config = core_config();
|
||||
let repo_path = clone_args.unique_path(&config.repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (res, _) = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
)
|
||||
.await
|
||||
.context("Failed to clone Build repo")?;
|
||||
|
||||
// Ensure clone / pull successful,
|
||||
// propogate error log -> 'errored' and return.
|
||||
if let Some(failure) = res.logs.iter().find(|log| !log.success) {
|
||||
return Ok(Some(RemoteDockerfileContents {
|
||||
path: Some(format!("Failed at: {}", failure.stage)),
|
||||
error: Some(failure.combined()),
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
|
||||
let relative_path = PathBuf::from(&build.config.build_path)
|
||||
.join(&build.config.dockerfile_path);
|
||||
|
||||
let full_path = repo_path.join(&relative_path);
|
||||
let (contents, error) =
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!("Failed to read dockerfile contents at {full_path:?}")
|
||||
}) {
|
||||
Ok(contents) => (Some(contents), None),
|
||||
Err(e) => (None, Some(format_serror(&e.into()))),
|
||||
impl Resolve<WriteArgs> for CreateBuildWebhook {
|
||||
#[instrument(name = "CreateBuildWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateBuildWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
Ok(Some(RemoteDockerfileContents {
|
||||
path: Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
hash: res.commit_hash,
|
||||
message: res.commit_message,
|
||||
}))
|
||||
|
||||
let WriteArgs { user } = args;
|
||||
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if build.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&build.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !build.config.webhook_enabled {
|
||||
UpdateBuild {
|
||||
id: build.id,
|
||||
config: PartialBuildConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update build to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RemoteDockerfileContents {
|
||||
pub path: Option<String>,
|
||||
pub contents: Option<String>,
|
||||
pub error: Option<String>,
|
||||
pub hash: Option<String>,
|
||||
pub message: Option<String>,
|
||||
impl Resolve<WriteArgs> for DeleteBuildWebhook {
|
||||
#[instrument(name = "DeleteBuildWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteBuildWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't delete webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,81 +6,55 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuilder {
|
||||
#[instrument(
|
||||
"CreateBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateBuilder", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
resource::create::<Builder>(&self.name, self.config, None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyBuilder {
|
||||
#[instrument(
|
||||
"CopyBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.name,
|
||||
copy_builder = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyBuilder", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
let Builder { config, .. } = get_check_permissions::<Builder>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
let Builder { config, .. } =
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, config.into(), &user)
|
||||
.await?,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Builder>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuilder {
|
||||
#[instrument(
|
||||
"DeleteBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteBuilder", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(resource::delete::<Builder>(&self.id, user).await?)
|
||||
Ok(resource::delete::<Builder>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateBuilder {
|
||||
#[instrument(
|
||||
"UpdateBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateBuilder", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -93,19 +67,14 @@ impl Resolve<WriteArgs> for UpdateBuilder {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameBuilder {
|
||||
#[instrument(
|
||||
"RenameBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameBuilder", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(resource::rename::<Builder>(&self.id, &self.name, user).await?)
|
||||
Ok(
|
||||
resource::rename::<Builder>(&self.id, &self.name, &user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
Operation,
|
||||
deployment::{
|
||||
Deployment, DeploymentImage, DeploymentState,
|
||||
PartialDeploymentConfig, RestartMode,
|
||||
@@ -12,10 +10,12 @@ use komodo_client::{
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
to_container_compatible_name,
|
||||
to_komodo_name,
|
||||
update::Update,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use periphery_client::api::{self, container::InspectContainer};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -25,7 +25,6 @@ use crate::{
|
||||
query::get_deployment_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, db_client, server_status_cache},
|
||||
};
|
||||
@@ -33,78 +32,52 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDeployment {
|
||||
#[instrument(
|
||||
"CreateDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateDeployment", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
self.config,
|
||||
None,
|
||||
user,
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyDeployment {
|
||||
#[instrument(
|
||||
"CopyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.name,
|
||||
copy_deployment = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyDeployment", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
let Deployment { config, .. } =
|
||||
get_check_permissions::<Deployment>(
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
Ok(
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
&user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
#[instrument(
|
||||
"CreateDeploymentFromContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
deployment = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateDeploymentFromContainer", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.inspect().attach(),
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -119,8 +92,7 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let container = periphery_client(&server)
|
||||
.await?
|
||||
let container = periphery_client(&server)?
|
||||
.request(InspectContainer {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
@@ -184,67 +156,46 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
});
|
||||
}
|
||||
|
||||
resource::create::<Deployment>(&self.name, config, None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config, &user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteDeployment {
|
||||
#[instrument(
|
||||
"DeleteDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteDeployment", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(resource::delete::<Deployment>(&self.id, user).await?)
|
||||
Ok(resource::delete::<Deployment>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDeployment {
|
||||
#[instrument(
|
||||
"UpdateDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateDeployment", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(
|
||||
resource::update::<Deployment>(&self.id, self.config, user)
|
||||
resource::update::<Deployment>(&self.id, self.config, &user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameDeployment {
|
||||
#[instrument(
|
||||
"RenameDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameDeployment", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -259,10 +210,9 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_container_compatible_name(&self.name);
|
||||
let name = to_komodo_name(&self.name);
|
||||
|
||||
let container_state =
|
||||
get_deployment_state(&deployment.id).await?;
|
||||
let container_state = get_deployment_state(&deployment).await?;
|
||||
|
||||
if container_state == DeploymentState::Unknown {
|
||||
return Err(
|
||||
@@ -274,12 +224,12 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&deployment, Operation::RenameDeployment, user);
|
||||
make_update(&deployment, Operation::RenameDeployment, &user);
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().deployments,
|
||||
&deployment.id,
|
||||
database::mungos::update::Update::Set(
|
||||
mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
@@ -290,8 +240,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
if container_state != DeploymentState::NotDeployed {
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
let log = periphery_client(&server)
|
||||
.await?
|
||||
let log = periphery_client(&server)?
|
||||
.request(api::container::RenameContainer {
|
||||
curr_name: deployment.name.clone(),
|
||||
new_name: name.clone(),
|
||||
|
||||
123
bin/core/src/api/write/description.rs
Normal file
123
bin/core/src/api/write/description.rs
Normal file
@@ -0,0 +1,123 @@
|
||||
use anyhow::anyhow;
|
||||
use komodo_client::{
|
||||
api::write::{UpdateDescription, UpdateDescriptionResponse},
|
||||
entities::{
|
||||
action::Action, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate, stack::Stack,
|
||||
sync::ResourceSync, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDescription {
|
||||
#[instrument(name = "UpdateDescription", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateDescriptionResponse> {
|
||||
match self.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update description of System resource target"
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_description::<Server>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_description::<Deployment>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::update_description::<Build>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_description::<Repo>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_description::<Builder>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_description::<Alerter>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_description::<Procedure>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
resource::update_description::<Action>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::update_description::<ServerTemplate>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_description::<ResourceSync>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_description::<Stack>(
|
||||
&id,
|
||||
&self.description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateDescriptionResponse {})
|
||||
}
|
||||
}
|
||||
@@ -1,40 +1,34 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::auth::auth_request;
|
||||
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alert;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod description;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod resource;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod service_user;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod terminal;
|
||||
mod user;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
@@ -47,14 +41,13 @@ pub struct WriteArgs {
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug, Display)]
|
||||
#[variant_derive(Debug)]
|
||||
#[args(WriteArgs)]
|
||||
#[response(Response)]
|
||||
#[error(serror::Error)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum WriteRequest {
|
||||
// ==== USER ====
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
UpdateUserUsername(UpdateUserUsername),
|
||||
UpdateUserPassword(UpdateUserPassword),
|
||||
DeleteUser(DeleteUser),
|
||||
@@ -72,7 +65,6 @@ pub enum WriteRequest {
|
||||
AddUserToUserGroup(AddUserToUserGroup),
|
||||
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
|
||||
SetUsersInUserGroup(SetUsersInUserGroup),
|
||||
SetEveryoneUserGroup(SetEveryoneUserGroup),
|
||||
|
||||
// ==== PERMISSIONS ====
|
||||
UpdateUserAdmin(UpdateUserAdmin),
|
||||
@@ -80,27 +72,15 @@ pub enum WriteRequest {
|
||||
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
|
||||
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
|
||||
|
||||
// ==== RESOURCE ====
|
||||
UpdateResourceMeta(UpdateResourceMeta),
|
||||
// ==== DESCRIPTION ====
|
||||
UpdateDescription(UpdateDescription),
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer(CreateServer),
|
||||
CopyServer(CopyServer),
|
||||
DeleteServer(DeleteServer),
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
CreateNetwork(CreateNetwork),
|
||||
UpdateServerPublicKey(UpdateServerPublicKey),
|
||||
RotateServerKeys(RotateServerKeys),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
CreateDeployment(CreateDeployment),
|
||||
@@ -116,8 +96,9 @@ pub enum WriteRequest {
|
||||
DeleteBuild(DeleteBuild),
|
||||
UpdateBuild(UpdateBuild),
|
||||
RenameBuild(RenameBuild),
|
||||
WriteBuildFileContents(WriteBuildFileContents),
|
||||
RefreshBuildCache(RefreshBuildCache),
|
||||
CreateBuildWebhook(CreateBuildWebhook),
|
||||
DeleteBuildWebhook(DeleteBuildWebhook),
|
||||
|
||||
// ==== BUILDER ====
|
||||
CreateBuilder(CreateBuilder),
|
||||
@@ -126,6 +107,13 @@ pub enum WriteRequest {
|
||||
UpdateBuilder(UpdateBuilder),
|
||||
RenameBuilder(RenameBuilder),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
CreateServerTemplate(CreateServerTemplate),
|
||||
CopyServerTemplate(CopyServerTemplate),
|
||||
DeleteServerTemplate(DeleteServerTemplate),
|
||||
UpdateServerTemplate(UpdateServerTemplate),
|
||||
RenameServerTemplate(RenameServerTemplate),
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo(CreateRepo),
|
||||
CopyRepo(CopyRepo),
|
||||
@@ -133,6 +121,8 @@ pub enum WriteRequest {
|
||||
UpdateRepo(UpdateRepo),
|
||||
RenameRepo(RenameRepo),
|
||||
RefreshRepoCache(RefreshRepoCache),
|
||||
CreateRepoWebhook(CreateRepoWebhook),
|
||||
DeleteRepoWebhook(DeleteRepoWebhook),
|
||||
|
||||
// ==== ALERTER ====
|
||||
CreateAlerter(CreateAlerter),
|
||||
@@ -164,18 +154,25 @@ pub enum WriteRequest {
|
||||
WriteSyncFileContents(WriteSyncFileContents),
|
||||
CommitSync(CommitSync),
|
||||
RefreshResourceSyncPending(RefreshResourceSyncPending),
|
||||
CreateSyncWebhook(CreateSyncWebhook),
|
||||
DeleteSyncWebhook(DeleteSyncWebhook),
|
||||
|
||||
// ==== TERMINAL ====
|
||||
CreateTerminal(CreateTerminal),
|
||||
DeleteTerminal(DeleteTerminal),
|
||||
DeleteAllTerminals(DeleteAllTerminals),
|
||||
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
CreateStackWebhook(CreateStackWebhook),
|
||||
DeleteStackWebhook(DeleteStackWebhook),
|
||||
|
||||
// ==== TAG ====
|
||||
CreateTag(CreateTag),
|
||||
DeleteTag(DeleteTag),
|
||||
RenameTag(RenameTag),
|
||||
UpdateTagColor(UpdateTagColor),
|
||||
UpdateTagsOnResource(UpdateTagsOnResource),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
CreateVariable(CreateVariable),
|
||||
@@ -184,42 +181,21 @@ pub enum WriteRequest {
|
||||
UpdateVariableIsSecret(UpdateVariableIsSecret),
|
||||
DeleteVariable(DeleteVariable),
|
||||
|
||||
// ==== PROVIDER ====
|
||||
// ==== PROVIDERS ====
|
||||
CreateGitProviderAccount(CreateGitProviderAccount),
|
||||
UpdateGitProviderAccount(UpdateGitProviderAccount),
|
||||
DeleteGitProviderAccount(DeleteGitProviderAccount),
|
||||
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
|
||||
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
|
||||
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
CreateOnboardingKey(CreateOnboardingKey),
|
||||
UpdateOnboardingKey(UpdateOnboardingKey),
|
||||
DeleteOnboardingKey(DeleteOnboardingKey),
|
||||
|
||||
// ==== ALERT ====
|
||||
CloseAlert(CloseAlert),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: WriteRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<WriteRequest>,
|
||||
@@ -230,25 +206,38 @@ async fn handler(
|
||||
.await
|
||||
.context("failure in spawned task");
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/write request {req_id} spawn error: {e:#}");
|
||||
}
|
||||
|
||||
res?
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "WriteRequest",
|
||||
skip(user, request),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
request = format!("{:?}", request.extract_variant())
|
||||
)
|
||||
)]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: WriteRequest,
|
||||
user: User,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let variant = request.extract_variant();
|
||||
info!("/write request | {variant} | user: {}", user.username);
|
||||
info!("/write request | user: {}", user.username);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
||||
let res = request.resolve(&WriteArgs { user }).await;
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!(
|
||||
"/write request {req_id} | {variant} | error: {:#}",
|
||||
e.error
|
||||
);
|
||||
warn!("/write request {req_id} error: {:#}", e.error);
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/write request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res.map(|res| res.0)
|
||||
}
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateOnboardingKey, CreateOnboardingKeyResponse,
|
||||
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
|
||||
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
|
||||
},
|
||||
entities::{
|
||||
komodo_timestamp, onboarding_key::OnboardingKey, random_string,
|
||||
},
|
||||
};
|
||||
use noise::key::EncodedKeyPair;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCode, AddStatusCodeError};
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateOnboardingKey {
|
||||
#[instrument(
|
||||
"CreateOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
name = self.name,
|
||||
expires = self.expires,
|
||||
tags = format!("{:?}", self.tags),
|
||||
copy_server = self.copy_server,
|
||||
create_builder = self.create_builder,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<CreateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let private_key = if let Some(private_key) = self.private_key {
|
||||
private_key
|
||||
} else {
|
||||
format!("O-{}", random_string(30))
|
||||
};
|
||||
let public_key = EncodedKeyPair::from_private_key(&private_key)?
|
||||
.public
|
||||
.into_inner();
|
||||
let onboarding_key = OnboardingKey {
|
||||
public_key,
|
||||
name: self.name,
|
||||
enabled: true,
|
||||
onboarded: Default::default(),
|
||||
created_at: komodo_timestamp(),
|
||||
expires: self.expires,
|
||||
tags: self.tags,
|
||||
copy_server: self.copy_server,
|
||||
create_builder: self.create_builder,
|
||||
};
|
||||
let db = db_client();
|
||||
// Create the key
|
||||
db.onboarding_keys
|
||||
.insert_one(&onboarding_key)
|
||||
.await
|
||||
.context(
|
||||
"Failed to create Server onboarding key on database",
|
||||
)?;
|
||||
let created = db
|
||||
.onboarding_keys
|
||||
.find_one(doc! { "public_key": &onboarding_key.public_key })
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context(
|
||||
"No Server onboarding key found on database after create",
|
||||
)?;
|
||||
Ok(CreateOnboardingKeyResponse {
|
||||
private_key,
|
||||
created,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateOnboardingKey {
|
||||
#[instrument(
|
||||
"UpdateOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
public_key = self.public_key,
|
||||
update = format!("{:?}", self),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
|
||||
// No changes
|
||||
if self.is_none() {
|
||||
return db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
let mut update = Document::new();
|
||||
|
||||
if let Some(enabled) = self.enabled {
|
||||
update.insert("enabled", enabled);
|
||||
}
|
||||
|
||||
if let Some(name) = self.name {
|
||||
update.insert("name", name);
|
||||
}
|
||||
|
||||
if let Some(expires) = self.expires {
|
||||
update.insert("expires", expires);
|
||||
}
|
||||
|
||||
if let Some(tags) = self.tags {
|
||||
update.insert("tags", tags);
|
||||
}
|
||||
|
||||
if let Some(copy_server) = self.copy_server {
|
||||
update.insert("copy_server", copy_server);
|
||||
}
|
||||
|
||||
if let Some(create_builder) = self.create_builder {
|
||||
update.insert("create_builder", create_builder);
|
||||
}
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.update_one(query.clone(), doc! { "$set": update })
|
||||
.await
|
||||
.context("Failed to update onboarding key on database")?;
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteOnboardingKey {
|
||||
#[instrument(
|
||||
"DeleteOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
public_key = self.public_key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<DeleteOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let db = db_client();
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
let creation_key = db
|
||||
.onboarding_keys
|
||||
.find_one(query.clone())
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context("Server onboarding key matching provided public key not found")
|
||||
.status_code(StatusCode::NOT_FOUND)?;
|
||||
db.onboarding_keys.delete_one(query).await.context(
|
||||
"Failed to delete Server onboarding key from database",
|
||||
)?;
|
||||
Ok(creation_key)
|
||||
}
|
||||
}
|
||||
@@ -1,19 +1,18 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId, to_bson},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use derive_variants::ExtractVariant as _;
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
ResourceTarget, ResourceTargetVariant,
|
||||
permission::{UserTarget, UserTargetVariant},
|
||||
ResourceTarget, ResourceTargetVariant,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{doc, oid::ObjectId, Document},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
@@ -23,15 +22,7 @@ use crate::{helpers::query::get_user, state::db_client};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserAdmin {
|
||||
#[instrument(
|
||||
"UpdateUserAdmin",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = super_admin.id,
|
||||
target_user = self.user_id,
|
||||
admin = self.admin,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateUserAdmin", skip(super_admin))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: super_admin }: &WriteArgs,
|
||||
@@ -69,25 +60,11 @@ impl Resolve<WriteArgs> for UpdateUserAdmin {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
#[instrument(
|
||||
"UpdateUserBasePermissions",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
target_user = self.user_id,
|
||||
enabled = self.enabled,
|
||||
create_servers = self.create_servers,
|
||||
create_builds = self.create_builds,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateUserBasePermissions", skip(admin))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdateUserBasePermissionsResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let UpdateUserBasePermissions {
|
||||
user_id,
|
||||
enabled,
|
||||
@@ -95,6 +72,10 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
create_builds,
|
||||
} = self;
|
||||
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let user = find_one_by_id(&db_client().users, &user_id)
|
||||
.await
|
||||
.context("failed to query mongo for user")?
|
||||
@@ -126,7 +107,7 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user_id,
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
@@ -136,30 +117,21 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
#[instrument(
|
||||
"UpdatePermissionOnResourceType",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
user_target = format!("{:?}", self.user_target),
|
||||
resource_type = self.resource_type.to_string(),
|
||||
permission = format!("{:?}", self.permission),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdatePermissionOnResourceType", skip(admin))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdatePermissionOnResourceTypeResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let Self {
|
||||
let UpdatePermissionOnResourceType {
|
||||
user_target,
|
||||
resource_type,
|
||||
permission,
|
||||
} = self;
|
||||
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
// Some extra checks if user target is an actual User
|
||||
if let UserTarget::User(user_id) = &user_target {
|
||||
let user = get_user(user_id).await?;
|
||||
@@ -181,11 +153,9 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
|
||||
let id = ObjectId::from_str(&user_target_id)
|
||||
.context("id is not ObjectId")?;
|
||||
let filter = doc! { "_id": id };
|
||||
let field = format!("all.{resource_type}");
|
||||
let set =
|
||||
to_bson(&permission).context("permission is not Bson")?;
|
||||
let update = doc! { "$set": { &field: &set } };
|
||||
let filter = doc! { "_id": id };
|
||||
let update = doc! { "$set": { &field: permission.as_ref() } };
|
||||
|
||||
match user_target_variant {
|
||||
UserTargetVariant::User => {
|
||||
@@ -194,7 +164,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
.update_one(filter, update)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to set {field}: {set} on db")
|
||||
format!("failed to set {field}: {permission} on db")
|
||||
})?;
|
||||
}
|
||||
UserTargetVariant::UserGroup => {
|
||||
@@ -203,7 +173,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
.update_one(filter, update)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to set {field}: {set} on db")
|
||||
format!("failed to set {field}: {permission} on db")
|
||||
})?;
|
||||
}
|
||||
}
|
||||
@@ -213,37 +183,24 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
#[instrument(
|
||||
"UpdatePermissionOnTarget",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
user_target = format!("{:?}", self.user_target),
|
||||
resource_type = self.resource_target.extract_variant().to_string(),
|
||||
resource_id = self.resource_target.extract_variant_id().1,
|
||||
permission = format!("{:?}", self.permission),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdatePermissionOnTarget", skip(admin))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdatePermissionOnTargetResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let UpdatePermissionOnTarget {
|
||||
user_target,
|
||||
resource_target,
|
||||
permission,
|
||||
} = self;
|
||||
|
||||
// Some extra checks relevant if user target is an actual User
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
// Some extra checks if user target is an actual User
|
||||
if let UserTarget::User(user_id) = &user_target {
|
||||
let user = get_user(user_id).await?;
|
||||
if !user.enabled {
|
||||
return Err(anyhow!("user not enabled").into());
|
||||
}
|
||||
if user.admin {
|
||||
return Err(
|
||||
anyhow!(
|
||||
@@ -252,6 +209,9 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if !user.enabled {
|
||||
return Err(anyhow!("user not enabled").into());
|
||||
}
|
||||
}
|
||||
|
||||
let (user_target_variant, user_target_id) =
|
||||
@@ -263,9 +223,6 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
let (user_target_variant, resource_variant) =
|
||||
(user_target_variant.as_ref(), resource_variant.as_ref());
|
||||
|
||||
let specific = to_bson(&permission.specific)
|
||||
.context("permission.specific is not valid Bson")?;
|
||||
|
||||
db_client()
|
||||
.permissions
|
||||
.update_one(
|
||||
@@ -281,8 +238,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
"user_target.id": user_target_id,
|
||||
"resource_target.type": resource_variant,
|
||||
"resource_target.id": resource_id,
|
||||
"level": permission.level.as_ref(),
|
||||
"specific": specific
|
||||
"level": permission.as_ref(),
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -450,6 +406,20 @@ async fn extract_resource_target_with_validation(
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Action, id))
|
||||
}
|
||||
ResourceTarget::ServerTemplate(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.server_templates
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for server templates")?
|
||||
.context("no matching server template found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ServerTemplate, id))
|
||||
}
|
||||
ResourceTarget::ResourceSync(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
|
||||
@@ -6,70 +6,45 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateProcedure {
|
||||
#[instrument(
|
||||
"CreateProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateProcedure", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateProcedureResponse> {
|
||||
resource::create::<Procedure>(&self.name, self.config, None, user)
|
||||
.await
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyProcedure {
|
||||
#[instrument(
|
||||
"CopyProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.name,
|
||||
copy_procedure = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyProcedure", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CopyProcedureResponse> {
|
||||
let Procedure { config, .. } =
|
||||
get_check_permissions::<Procedure>(
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Procedure>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateProcedure {
|
||||
#[instrument(
|
||||
"UpdateProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateProcedure", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -82,15 +57,7 @@ impl Resolve<WriteArgs> for UpdateProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameProcedure {
|
||||
#[instrument(
|
||||
"RenameProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameProcedure", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -103,18 +70,11 @@ impl Resolve<WriteArgs> for RenameProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteProcedure {
|
||||
#[instrument(
|
||||
"DeleteProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "DeleteProcedure", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<DeleteProcedureResponse> {
|
||||
Ok(resource::delete::<Procedure>(&self.id, user).await?)
|
||||
Ok(resource::delete::<Procedure>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
Operation, ResourceTarget,
|
||||
provider::{DockerRegistryAccount, GitProviderAccount},
|
||||
Operation, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
helpers::update::{add_update, make_update},
|
||||
@@ -22,61 +20,45 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
#[instrument(
|
||||
"CreateGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
https = self.account.https.unwrap_or(true),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can create git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
anyhow!("only admins can create git provider accounts")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut account: GitProviderAccount = self.account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Domain cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
return Err(anyhow!("domain cannot be empty string.").into());
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Username cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
return Err(anyhow!("username cannot be empty string.").into());
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::CreateGitProviderAccount,
|
||||
user,
|
||||
&user,
|
||||
);
|
||||
|
||||
account.id = db_client()
|
||||
.git_accounts
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context("Failed to create git provider account on db")?
|
||||
.context("failed to create git provider account on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("Inserted id is not ObjectId")?
|
||||
.context("inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"Create git provider account",
|
||||
"create git provider account",
|
||||
format!(
|
||||
"Created git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -88,7 +70,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("Failed to add update for create git provider account | {e:#}")
|
||||
error!("failed to add update for create git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -97,44 +79,33 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
#[instrument(
|
||||
"UpdateGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
https = self.account.https.unwrap_or(true),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
mut self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can update git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
anyhow!("only admins can update git provider accounts")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.account.domain
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Cannot update git provider with empty domain")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
if let Some(domain) = &self.account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty domain")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(username) = &self.account.username
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Cannot update git provider with empty username")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
if let Some(username) = &self.account.username {
|
||||
if username.is_empty() {
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty username")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure update does not change id
|
||||
@@ -143,11 +114,11 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateGitProviderAccount,
|
||||
user,
|
||||
&user,
|
||||
);
|
||||
|
||||
let account = to_document(&self.account).context(
|
||||
"Failed to serialize partial git provider account to bson",
|
||||
"failed to serialize partial git provider account to bson",
|
||||
)?;
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
@@ -157,17 +128,17 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("Failed to update git provider account on db")?;
|
||||
.context("failed to update git provider account on db")?;
|
||||
|
||||
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
|
||||
.await
|
||||
.context("Failed to query db for git accounts")?
|
||||
.context("failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("No account found with given id").into());
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"Update git provider account",
|
||||
"update git provider account",
|
||||
format!(
|
||||
"Updated git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -179,7 +150,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("Failed to add update for update git provider account | {e:#}")
|
||||
error!("failed to add update for update git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -188,47 +159,36 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
#[instrument(
|
||||
"DeleteGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can delete git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
anyhow!("only admins can delete git provider accounts")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateGitProviderAccount,
|
||||
user,
|
||||
&user,
|
||||
);
|
||||
|
||||
let db = db_client();
|
||||
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
|
||||
.await
|
||||
.context("Failed to query db for git accounts")?
|
||||
.context("failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(
|
||||
anyhow!("No account found with given id")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
};
|
||||
delete_one_by_id(&db.git_accounts, &self.id, None)
|
||||
.await
|
||||
.context("failed to delete git account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"Delete git provider account",
|
||||
"delete git provider account",
|
||||
format!(
|
||||
"Deleted git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -240,7 +200,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("Failed to add update for delete git provider account | {e:#}")
|
||||
error!("failed to add update for delete git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -249,15 +209,6 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"CreateDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -265,32 +216,26 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Only admins can create docker registry account accounts"
|
||||
"only admins can create docker registry account accounts"
|
||||
)
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut account: DockerRegistryAccount = self.account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Domain cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
return Err(anyhow!("domain cannot be empty string.").into());
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Username cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
return Err(anyhow!("username cannot be empty string.").into());
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::CreateDockerRegistryAccount,
|
||||
user,
|
||||
&user,
|
||||
);
|
||||
|
||||
account.id = db_client()
|
||||
@@ -298,15 +243,15 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context(
|
||||
"Failed to create docker registry account account on db",
|
||||
"failed to create docker registry account account on db",
|
||||
)?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("Inserted id is not ObjectId")?
|
||||
.context("inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"Create docker registry account",
|
||||
"create docker registry account",
|
||||
format!(
|
||||
"Created docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -318,7 +263,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("Failed to add update for create docker registry account | {e:#}")
|
||||
error!("failed to add update for create docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -327,47 +272,37 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"UpdateDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
mut self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can update docker registry accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
anyhow!("only admins can update docker registry accounts")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.account.domain
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot update docker registry account with empty domain"
|
||||
)
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
if let Some(domain) = &self.account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty domain"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(username) = &self.account.username
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot update docker registry account with empty username"
|
||||
if let Some(username) = &self.account.username {
|
||||
if username.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty username"
|
||||
)
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
self.account.id = None;
|
||||
@@ -375,11 +310,11 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateDockerRegistryAccount,
|
||||
user,
|
||||
&user,
|
||||
);
|
||||
|
||||
let account = to_document(&self.account).context(
|
||||
"Failed to serialize partial docker registry account account to bson",
|
||||
"failed to serialize partial docker registry account account to bson",
|
||||
)?;
|
||||
|
||||
let db = db_client();
|
||||
@@ -391,19 +326,19 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Failed to update docker registry account account on db",
|
||||
"failed to update docker registry account account on db",
|
||||
)?;
|
||||
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.registry_accounts, &self.id)
|
||||
.await
|
||||
.context("Failed to query db for registry accounts")?
|
||||
.context("failed to query db for registry accounts")?
|
||||
else {
|
||||
return Err(anyhow!("No account found with given id").into());
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"Update docker registry account",
|
||||
"update docker registry account",
|
||||
format!(
|
||||
"Updated docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -415,7 +350,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("Failed to add update for update docker registry account | {e:#}")
|
||||
error!("failed to add update for update docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -424,48 +359,37 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"DeleteDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can delete docker registry accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
anyhow!("only admins can delete docker registry accounts")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateDockerRegistryAccount,
|
||||
user,
|
||||
&user,
|
||||
);
|
||||
|
||||
let db = db_client();
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.registry_accounts, &self.id)
|
||||
.await
|
||||
.context("Failed to query db for git accounts")?
|
||||
.context("failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(
|
||||
anyhow!("No account found with given id")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
};
|
||||
delete_one_by_id(&db.registry_accounts, &self.id, None)
|
||||
.await
|
||||
.context("Failed to delete registry account on db")?;
|
||||
.context("failed to delete registry account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"Delete registry account",
|
||||
"delete registry account",
|
||||
format!(
|
||||
"Deleted registry account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -477,7 +401,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("Failed to add update for delete docker registry account | {e:#}")
|
||||
error!("failed to add update for delete docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
NoData, Operation, RepoExecutionArgs, komodo_timestamp,
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
repo::{Repo, RepoInfo},
|
||||
repo::{PartialRepoConfig, Repo, RepoInfo},
|
||||
server::Server,
|
||||
to_path_compatible_name,
|
||||
to_komodo_name,
|
||||
update::{Log, Update},
|
||||
CloneArgs, NoData, Operation,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -24,84 +28,51 @@ use crate::{
|
||||
git_token, periphery_client,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, db_client},
|
||||
state::{action_states, db_client, github_client},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateRepo {
|
||||
#[instrument(
|
||||
"CreateRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CreateRepo", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
resource::create::<Repo>(&self.name, self.config, None, user)
|
||||
.await
|
||||
Ok(resource::create::<Repo>(&self.name, self.config, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyRepo {
|
||||
#[instrument(
|
||||
"CopyRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.name,
|
||||
copy_repo = self.id,
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "CopyRepo", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
let Repo { config, .. } = get_check_permissions::<Repo>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
let Repo { config, .. } =
|
||||
resource::get_check_permissions::<Repo>(
|
||||
&self.id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Repo>(&self.name, config.into(), &user)
|
||||
.await?,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Repo>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteRepo {
|
||||
#[instrument(
|
||||
"DeleteRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(resource::delete::<Repo>(&self.id, user).await?)
|
||||
#[instrument(name = "DeleteRepo", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Repo> {
|
||||
Ok(resource::delete::<Repo>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateRepo {
|
||||
#[instrument(
|
||||
"UpdateRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "UpdateRepo", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -111,23 +82,15 @@ impl Resolve<WriteArgs> for UpdateRepo {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameRepo {
|
||||
#[instrument(
|
||||
"RenameRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
#[instrument(name = "RenameRepo", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -135,7 +98,7 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
|| !repo.config.path.is_empty()
|
||||
{
|
||||
return Ok(
|
||||
resource::rename::<Repo>(&repo.id, &self.name, user).await?,
|
||||
resource::rename::<Repo>(&repo.id, &self.name, &user).await?,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -148,14 +111,14 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_path_compatible_name(&self.name);
|
||||
let name = to_komodo_name(&self.name);
|
||||
|
||||
let mut update = make_update(&repo, Operation::RenameRepo, user);
|
||||
let mut update = make_update(&repo, Operation::RenameRepo, &user);
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().repos,
|
||||
&repo.id,
|
||||
database::mungos::update::Update::Set(
|
||||
mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
@@ -166,10 +129,9 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
let log = match periphery_client(&server)?
|
||||
.request(api::git::RenameRepo {
|
||||
curr_name: to_path_compatible_name(&repo.name),
|
||||
curr_name: to_komodo_name(&repo.name),
|
||||
new_name: name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -196,16 +158,21 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
#[instrument(
|
||||
name = "RefreshRepoCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// repo should be able to do this.
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -216,10 +183,13 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let mut clone_args: RepoExecutionArgs = (&repo).into();
|
||||
let mut clone_args: CloneArgs = (&repo).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
@@ -233,10 +203,14 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
None
|
||||
};
|
||||
|
||||
let (res, _) = git::pull_or_clone(
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -248,8 +222,8 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
last_built_at: repo.info.last_built_at,
|
||||
built_hash: repo.info.built_hash,
|
||||
built_message: repo.info.built_message,
|
||||
latest_hash: res.commit_hash,
|
||||
latest_message: res.commit_message,
|
||||
latest_hash: hash,
|
||||
latest_message: message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
@@ -267,3 +241,220 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateRepoWebhook {
|
||||
#[instrument(name = "CreateRepoWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateRepoWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
&args.user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if repo.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&repo.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo_name, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
UpdateRepo {
|
||||
id: repo.id,
|
||||
config: PartialRepoConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update repo to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteRepoWebhook {
|
||||
#[instrument(name = "DeleteRepoWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteRepoWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo_name, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user