Compare commits

..

107 Commits

Author SHA1 Message Date
mbecker20
f458106263 batch Build builds 2025-03-13 00:55:29 -04:00
mbecker20
f022e83414 use Tooltip component instead of HoverCard for mobile compatibility 2025-03-13 00:01:26 -04:00
mbecker20
93ccc1ce7f update some deps 2025-03-12 23:39:39 -04:00
mbecker20
d69b7a74e7 isolate stacks / deployments with pending updates 2025-03-12 22:57:34 -04:00
wlatic
96cfaf3355 Update config.tsx (#358) 2025-03-12 22:21:06 -04:00
komodo
b7587e5426 soften tag opacity a bit 2025-03-12 22:21:06 -04:00
komodo
ab874211ed default new tag colors to grey 2025-03-12 22:21:06 -04:00
komodo
7094c4e3c5 fix sync summary count ok 2025-03-12 22:21:06 -04:00
komodo
386d463a0a colored tags 2025-03-12 22:21:05 -04:00
komodo
65fa969686 improve variable value table overflow 2025-03-12 22:21:05 -04:00
komodo
92099f311f use jsonwebtoken 2025-03-12 22:21:05 -04:00
komodo
9186d5fd50 resource sync toggle resource / variable / user group inclusion independantly 2025-03-12 22:21:05 -04:00
komodo
54e766f48b update deps 2025-03-12 22:21:05 -04:00
komodo
a1dd895b19 add KOMODO_LOCK_LOGIN_CREDENTIALS_FOR in config doc 2025-03-12 22:21:05 -04:00
komodo
3f767ed42e cycle the oidc client on interval to ensure up to date JWKs 2025-03-12 22:21:05 -04:00
komodo
4c14c33339 update docs on OIDC and client secret 2025-03-12 22:21:05 -04:00
komodo
ac43572acb OIDC: Support PKCE auth (secret optional) 2025-03-12 22:21:05 -04:00
komodo
c5451bed8e use png in topbar logo, svg quality sometimes bad 2025-03-12 22:21:05 -04:00
komodo
8f568ea8d1 update .devcontainer / dev docs for updated runfile 2025-03-12 22:21:05 -04:00
komodo
dc1062bb57 rename test.compose.yaml to dev.compose.yaml, and update runfile 2025-03-12 22:21:05 -04:00
komodo
a12f21ca7a rust 2024 and fmt 2025-03-12 22:21:05 -04:00
komodo
a0e5a86c89 add update_available query parameter to filter for only stacks /deployments with available update 2025-03-12 22:21:05 -04:00
komodo
2f9cf82cee Fix actions when core on https 2025-03-12 22:21:05 -04:00
komodo
a118696d54 add yarn install to runfile 2025-03-12 22:21:05 -04:00
mbecker20
d6abda063a stack edits on localstorage and show last deployed config 2025-03-12 22:21:05 -04:00
mbecker20
8fb4bea790 store sync edits on localstorage 2025-03-12 22:21:05 -04:00
mbecker20
8cc1d0106a rust 1.85 2025-03-12 22:21:05 -04:00
mbecker20
c0b213dfd4 clean up service_args 2025-03-12 22:21:05 -04:00
mbecker20
5ad00a41ca dev-5 fix the stack service executions 2025-03-12 22:21:05 -04:00
mbecker20
c91e107245 auto update all service deploy option 2025-03-12 22:21:05 -04:00
mbecker20
66a80c1262 Stacks executions take list of services -- Auto update only redeploys services with update 2025-03-12 22:21:05 -04:00
mbecker20
dd0d99dabc resource sync only add escaping on toml between the """ 2025-03-12 22:21:05 -04:00
mbecker20
13a0c5a2fa dev-4 2025-03-12 22:21:05 -04:00
mbecker20
177ce9dd45 update openidconnect dependency, and use reqwest rustls-tls-native-roots 2025-03-12 22:21:05 -04:00
mbecker20
d09a864453 update most deps 2025-03-12 22:21:05 -04:00
mbecker20
29be281f11 don't prune images if server not enabled 2025-03-12 22:21:05 -04:00
mbecker20
adb1a49305 make sure parent directories exist whenever writing files 2025-03-12 22:21:05 -04:00
mbecker20
014d3b2c13 ResourceSync state resolution refinement 2025-03-12 22:21:05 -04:00
mbecker20
42b1f7b6c8 revert to login screen whenever the call to check login fails 2025-03-12 22:21:05 -04:00
mbecker20
6ee707576b lock certain users username / password, prevent demo creds from being changed. 2025-03-12 22:21:05 -04:00
mbecker20
999ad9a4ce fix rand 2025-03-12 22:21:05 -04:00
mbecker20
94afb432f3 fix all clippy lints 2025-03-12 22:21:05 -04:00
mbecker20
48e871d400 standardize running commands with interpolation / output sanitizations 2025-03-12 22:21:05 -04:00
mbecker20
723853e92d Improve resource sync Execute / Pending view selector 2025-03-12 22:21:05 -04:00
mbecker20
24b2a8ab75 ResourceSync: pending view toggle between "Execute" vs "Commit" sync direction 2025-03-12 22:21:05 -04:00
mbecker20
d541c4c202 set branch on git init folder 2025-03-12 22:21:05 -04:00
mbecker20
a09ced896f init sync file new repo 2025-03-12 22:21:05 -04:00
mbecker20
d0890436c3 Stack: Fix git repo new compose file initialization 2025-03-12 22:21:05 -04:00
mbecker20
54447fe56b show provider usernames from config file 2025-03-12 22:21:05 -04:00
mbecker20
625295d50b filters wrap 2025-03-12 22:21:05 -04:00
mbecker20
e23e37ac92 give server stat charts labels 2025-03-12 22:21:05 -04:00
mbecker20
d4e058a532 improve WriteComposeContentsToHost instrument fields 2025-03-12 22:21:05 -04:00
mbecker20
3890a287d1 ServerTemplate description 2025-03-12 22:21:05 -04:00
mbecker20
e169bbbd31 dev-3 2025-03-12 22:21:05 -04:00
mbecker20
d2cb543c76 use komodo_client.subscribe_to_update_websocket, and click indicator to reconnect 2025-03-12 22:21:05 -04:00
mbecker20
59da3812a9 Fix unclear ComposePull log re #244 2025-03-12 22:21:05 -04:00
mbecker20
ce10cbe684 improve pull to git init on existing folder without .git 2025-03-12 22:21:05 -04:00
unsync
dd901f7369 feature: improve tables quick actions on mobile (#312)
* feature: improve tables quick actions on mobile

* review: fix gap4

* review: use flex-wrap
2025-03-12 22:21:05 -04:00
mbecker20
7908149226 choose which stack services to include in logs 2025-03-12 22:21:05 -04:00
mbecker20
0283930207 fix api name chnage 2025-03-12 22:21:05 -04:00
mbecker20
487664a25a 1.17.0-dev-2 2025-03-12 22:21:05 -04:00
mbecker20
130ba1f54f Add all services stack log 2025-03-12 22:21:05 -04:00
mbecker20
2758b91e31 improve update indicator style and also put on home screen 2025-03-12 22:21:05 -04:00
mbecker20
d240b5c959 requery alerts more often 2025-03-12 22:21:05 -04:00
mbecker20
f8b8f76569 FIx PullStack re #302 and record docker compose config on stack deploy 2025-03-12 22:21:05 -04:00
mbecker20
d2b27294be improve First Login docs 2025-03-12 22:21:05 -04:00
unsync
c44313a9f1 feature: allow docker image text to overflow in table (#301)
* feature: allow docker image text to overflow in table

* review: use break-words

* wip: revert line break in css file

* feature: update devcontainer node release
2025-03-12 22:21:05 -04:00
mbecker20
1cc967f215 add save button to config bottom 2025-03-12 22:21:05 -04:00
mbecker20
53fcd899a4 add config save button in desktop sidebar navigator 2025-03-12 22:21:05 -04:00
mbecker20
b296971c1a add donate button docsite 2025-03-12 22:21:05 -04:00
mbecker20
6a4c88f8f4 typescript subscribe_to_update_websocket 2025-03-12 22:21:05 -04:00
mbecker20
51992c477d docs new organization 2025-03-12 22:21:05 -04:00
mbecker20
2c98c6ea40 fix new compose images 2025-03-12 22:21:05 -04:00
mbecker20
14658ba722 more legible favicon 2025-03-12 22:21:05 -04:00
mbecker20
192073a12c fix login screen logo 2025-03-12 22:21:05 -04:00
mbecker20
275f204a30 dev-1 2025-03-12 22:21:05 -04:00
mbecker20
a196e1ff7f remove example from cargo toml workspace 2025-03-12 22:21:05 -04:00
mbecker20
3ef5367ed1 mbecker20 -> moghtech 2025-03-12 22:21:05 -04:00
Maxwell Becker
ca2c728bf3 Remove .git from remote_url (#299)
Remove .git from remote_url

Co-authored-by: Deon Marshall <dmarshall@ccp.com.au>
2025-03-12 22:21:05 -04:00
unsync
f0d22642b2 feature: interpolate secrets in custom alerter (#289)
* feature: interpolate secrets in custom alerter

* fix rust warning

* review: sanitize errors

* review: sanitize error message
2025-03-12 22:21:05 -04:00
unsync
5d54876fff feature: add post_deploy command (#288)
* feature: add post_deploy command

* review: do not run post_deploy if deploy failed
2025-03-12 22:21:05 -04:00
mbecker20
c3c2f57db4 1.17.0-dev 2025-03-12 22:21:05 -04:00
unsync
30043e32a4 feature: use the repo path instead of name in GetLatestCommit (#282)
* Update repo path handling in commit fetching

- Changed `name` to `path` for repository identification.
- Updated cache update function to use the new path field.
- Improved error message for non-directory repo paths.

* feat: use optional name and path in GetLatestCommit

* review: don't use optional for name

* review: use helper

* review: remove redundant to_string()
2025-03-12 22:21:05 -04:00
mbecker20
33a2897c2c update available deployment table 2025-03-12 22:21:05 -04:00
mbecker20
2ae1313170 show update available stack table 2025-03-12 22:21:05 -04:00
mbecker20
b1e38714cf finish oidc comment 2025-03-12 22:21:05 -04:00
mbecker20
43fe613f85 clean up rust client websocket subscription 2025-03-12 22:21:05 -04:00
mbecker20
6a8f46ee7a escape incoming sync backslashes (BREAKING) 2025-03-12 22:21:05 -04:00
mbecker20
cbb323fce5 rename Test Alerter button 2025-03-12 22:21:05 -04:00
mbecker20
4d85c601d3 simplify network stats 2025-03-12 22:21:05 -04:00
mbecker20
b03a4db3ce komodo-logo 2025-03-12 22:21:05 -04:00
mbecker20
a19f2afc74 higher quality / colored icons 2025-03-12 22:21:05 -04:00
mbecker20
b55e5c584b Add test alerter button 2025-03-12 22:21:05 -04:00
mbecker20
4d1a975c79 fix last axum updates 2025-03-12 22:21:05 -04:00
mbecker20
f45fb6f635 axum update :param to {param} syntax 2025-03-12 22:21:05 -04:00
mbecker20
5c6e09a48c rust 1.84.0 2025-03-12 22:21:05 -04:00
mbecker20
6ffb421662 test alert implementation 2025-03-12 22:21:05 -04:00
mbecker20
85f1cc699c add entities / message for test alerter 2025-03-12 22:21:05 -04:00
mbecker20
3c4e0b23df the komodo env file should be highest priority over additional files 2025-03-12 22:21:05 -04:00
mbecker20
a524c3ac2d clean up cors 2025-03-12 22:21:05 -04:00
mbecker20
e89a5bface just make it 1.17.0 2025-03-12 22:21:05 -04:00
mbecker20
d67130f6ee bump aws deps 2025-03-12 22:21:05 -04:00
mbecker20
d76736b71e axum to 0.8 2025-03-12 22:21:05 -04:00
mbecker20
c3857e93b6 resource2 not really a benefit 2025-03-12 22:21:05 -04:00
mbecker20
c879c393be format 2025-03-12 22:21:05 -04:00
mbecker20
7bae90b661 fmt 2025-03-12 22:21:05 -04:00
mbecker20
bef9d9397c resolver v3
add new ec2 instance types

clean up testing config

document the libraries a bit

clean up main

update sysinfo and otel

update client resolver 3.0

resolver v3 prog

clean up gitignore

implement periphery resolver v3

clean up

core read api v3

more prog

execute api

missing apis

compiling

1.16.13

work on more granular traits

prog on crud
2025-03-12 22:21:05 -04:00
329 changed files with 11420 additions and 25693 deletions

1712
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@ members = [
]
[workspace.package]
version = "1.18.0"
version = "1.17.0-dev-7"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -30,7 +30,7 @@ git = { path = "lib/git" }
# MOGH
run_command = { version = "0.0.6", features = ["async_tokio"] }
serror = { version = "0.5.0", default-features = false }
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
slack = { version = "0.3.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
derive_default_builder = "0.1.8"
derive_empty_traits = "0.1.0"
merge_config_files = "0.1.5"
@@ -44,86 +44,75 @@ mungos = "3.2.0"
svi = "1.0.1"
# ASYNC
reqwest = { version = "0.12.15", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
tokio = { version = "1.45.1", features = ["full"] }
tokio-util = { version = "0.7.15", features = ["io", "codec"] }
tokio-stream = { version = "0.1.17", features = ["sync"] }
pin-project-lite = "0.2.16"
reqwest = { version = "0.12.12", default-features = false, features = ["json", "rustls-tls-native-roots"] }
tokio = { version = "1.44.0", features = ["full"] }
tokio-util = "0.7.13"
futures = "0.3.31"
futures-util = "0.3.31"
arc-swap = "1.7.1"
# SERVER
tokio-tungstenite = { version = "0.26.2", features = ["rustls-tls-native-roots"] }
axum-extra = { version = "0.10.1", features = ["typed-header"] }
tower-http = { version = "0.6.4", features = ["fs", "cors"] }
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
axum-extra = { version = "0.10.0", features = ["typed-header"] }
tower-http = { version = "0.6.2", features = ["fs", "cors"] }
axum-server = { version = "0.7.1", features = ["tls-rustls"] }
axum = { version = "0.8.1", features = ["ws", "json", "macros"] }
tokio-tungstenite = "0.26.2"
# SER/DE
indexmap = { version = "2.9.0", features = ["serde"] }
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.219", features = ["derive"] }
strum = { version = "0.27.1", features = ["derive"] }
serde_json = "1.0.140"
serde_yaml = "0.9.34"
serde_qs = "0.15.0"
toml = "0.8.22"
toml = "0.8.20"
# ERROR
anyhow = "1.0.98"
anyhow = "1.0.97"
thiserror = "2.0.12"
# LOGGING
opentelemetry-otlp = { version = "0.29.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.29.0", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.28.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.28.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.19", features = ["json"] }
opentelemetry-semantic-conventions = "0.29.0"
tracing-opentelemetry = "0.30.0"
opentelemetry = "0.29.1"
opentelemetry-semantic-conventions = "0.28.0"
tracing-opentelemetry = "0.29.0"
opentelemetry = "0.28.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.38", features = ["derive"] }
clap = { version = "4.5.32", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.17.0", features = ["v4", "fast-rng", "serde"] }
uuid = { version = "1.15.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "9.3.1", default-features = false }
openidconnect = "4.0.0"
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.17.0"
base64 = "0.22.1"
rustls = "0.23.27"
rustls = "0.23.23"
hmac = "0.12.1"
sha2 = "0.10.9"
rand = "0.9.1"
sha2 = "0.10.8"
rand = "0.9.0"
hex = "0.4.3"
# SYSTEM
portable-pty = "0.9.0"
bollard = "0.19.0"
sysinfo = "0.35.1"
bollard = "0.18.1"
sysinfo = "0.33.1"
# CLOUD
aws-config = "1.6.3"
aws-sdk-ec2 = "1.134.0"
aws-credential-types = "1.2.3"
## CRON
english-to-cron = "0.1.6"
chrono-tz = "0.10.3"
chrono = "0.4.41"
croner = "2.1.0"
aws-config = "1.6.0"
aws-sdk-ec2 = "1.117.0"
aws-credential-types = "1.2.2"
# MISC
derive_builder = "0.20.2"
typeshare = "1.0.4"
octorust = "0.10.0"
octorust = "0.9.0"
dashmap = "6.1.0"
wildcard = "0.3.0"
colored = "3.0.0"
regex = "1.11.1"
bytes = "1.10.1"
bson = "2.15.0"
bson = "2.13.0"

View File

@@ -1,7 +1,7 @@
## Builds the Komodo Core, Periphery, and Util binaries
## Builds the Komodo Core and Periphery binaries
## for a specific architecture.
FROM rust:1.87.0-bullseye AS builder
FROM rust:1.85.0-bullseye AS builder
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./
@@ -10,21 +10,18 @@ COPY ./client/core/rs ./client/core/rs
COPY ./client/periphery ./client/periphery
COPY ./bin/core ./bin/core
COPY ./bin/periphery ./bin/periphery
COPY ./bin/util ./bin/util
# Compile bin
RUN \
cargo build -p komodo_core --release && \
cargo build -p komodo_periphery --release && \
cargo build -p komodo_util --release
cargo build -p komodo_periphery --release
# Copy just the binaries to scratch image
FROM scratch
COPY --from=builder /builder/target/release/core /core
COPY --from=builder /builder/target/release/periphery /periphery
COPY --from=builder /builder/target/release/util /util
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
LABEL org.opencontainers.image.description="Komodo Binaries"
LABEL org.opencontainers.image.description="Komodo Periphery"
LABEL org.opencontainers.image.licenses=GPL-3.0

View File

@@ -185,9 +185,6 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::PullStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BatchPullStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -432,10 +429,6 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
.execute(request)
.await
.map(ExecutionResult::Single),
Execution::BatchPullStack(request) => komodo_client()
.execute(request)
.await
.map(ExecutionResult::Batch),
Execution::StartStack(request) => komodo_client()
.execute(request)
.await

View File

@@ -38,10 +38,8 @@ slack.workspace = true
svi.workspace = true
# external
aws-credential-types.workspace = true
tokio-tungstenite.workspace = true
english-to-cron.workspace = true
ordered_hash_map.workspace = true
openidconnect.workspace = true
jsonwebtoken.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
@@ -52,8 +50,6 @@ tower-http.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
typeshare.workspace = true
chrono-tz.workspace = true
indexmap.workspace = true
octorust.workspace = true
wildcard.workspace = true
arc-swap.workspace = true
@@ -64,8 +60,6 @@ futures.workspace = true
nom_pem.workspace = true
dotenvy.workspace = true
anyhow.workspace = true
croner.workspace = true
chrono.workspace = true
bcrypt.workspace = true
base64.workspace = true
rustls.workspace = true
@@ -79,4 +73,5 @@ envy.workspace = true
rand.workspace = true
hmac.workspace = true
sha2.workspace = true
jsonwebtoken.workspace = true
hex.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.87.0-bullseye AS core-builder
FROM rust:1.85.0-bullseye AS core-builder
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./
@@ -24,9 +24,10 @@ RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:bullseye-slim
COPY ./bin/core/starship.toml /config/starship.toml
COPY ./bin/core/debian-deps.sh .
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
# Install Deps
RUN apt update && \
apt install -y git ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Setup an application directory
WORKDIR /app

View File

@@ -1,14 +0,0 @@
#!/bin/bash
## Core deps installer
apt-get update
apt-get install -y git curl ca-certificates
rm -rf /var/lib/apt/lists/*
# Starship prompt
curl -sS https://starship.rs/install.sh | sh -s -- --yes --bin-dir /usr/local/bin
echo 'export STARSHIP_CONFIG=/config/starship.toml' >> /root/.bashrc
echo 'eval "$(starship init bash)"' >> /root/.bashrc

View File

@@ -15,9 +15,10 @@ FROM ${FRONTEND_IMAGE} AS frontend
# Final Image
FROM debian:bullseye-slim
COPY ./bin/core/starship.toml /config/starship.toml
COPY ./bin/core/debian-deps.sh .
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
# Install Deps
RUN apt update && \
apt install -y git ca-certificates && \
rm -rf /var/lib/apt/lists/*
WORKDIR /app

View File

@@ -16,9 +16,10 @@ RUN cd frontend && yarn link komodo_client && yarn && yarn build
FROM debian:bullseye-slim
COPY ./bin/core/starship.toml /config/starship.toml
COPY ./bin/core/debian-deps.sh .
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
# Install Deps
RUN apt update && \
apt install -y git ca-certificates && \
rm -rf /var/lib/apt/lists/*
# Copy
COPY ./config/core.config.toml /config/config.toml

View File

@@ -189,52 +189,10 @@ pub async fn send_alert(
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for **{name}** failed\n{link}")
}
AlertData::ProcedureFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Procedure, id);
format!("{level} | Procedure **{name}** failed\n{link}")
}
AlertData::ActionFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Action, id);
format!("{level} | Action **{name}** failed\n{link}")
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let link = resource_link(*resource_type, id);
format!(
"{level} | **{name}** ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut url_interpolated = url.to_string();
// interpolate variables and secrets into the url
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut url_interpolated,
&mut global_replacers,
&mut secret_replacers,
)?;
send_message(&url_interpolated, &content)
.await
.map_err(|e| {
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {}",
sanitized_error
))
})?;
send_message(url, &content).await?;
}
Ok(())
}

View File

@@ -18,8 +18,6 @@ use crate::helpers::query::get_variables_and_secrets;
use crate::{config::core_config, state::db_client};
mod discord;
mod ntfy;
mod pushover;
mod slack;
#[instrument(level = "debug")]
@@ -130,24 +128,6 @@ pub async fn send_alert_to_alerter(
)
})
}
AlerterEndpoint::Ntfy(NtfyAlerterEndpoint { url, email }) => {
ntfy::send_alert(url, email.as_deref(), alert)
.await
.with_context(|| {
format!(
"Failed to send alert to ntfy Alerter {}",
alerter.name
)
})
}
AlerterEndpoint::Pushover(PushoverAlerterEndpoint { url }) => {
pushover::send_alert(url, alert).await.with_context(|| {
format!(
"Failed to send alert to Pushover Alerter {}",
alerter.name
)
})
}
}
}
@@ -262,6 +242,9 @@ fn resource_link(
ResourceTargetVariant::Action => {
format!("/actions/{id}")
}
ResourceTargetVariant::ServerTemplate => {
format!("/server-templates/{id}")
}
ResourceTargetVariant::ResourceSync => {
format!("/resource-syncs/{id}")
}

View File

@@ -1,272 +0,0 @@
use std::sync::OnceLock;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
email: Option<&str>,
alert: &Alert,
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let content = match &alert.data {
AlertData::Test { id, name } => {
let link = resource_link(ResourceTargetVariant::Alerter, id);
format!(
"{level} | If you see this message, then Alerter {} is working\n{link}",
name,
)
}
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | {}{} is now reachable\n{link}",
name, region
)
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\nerror: {:#?}", e))
.unwrap_or_default();
format!(
"{level} | {}{} is unreachable ❌\n{link}{err}",
name, region
)
}
_ => unreachable!(),
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
format!(
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
name, region,
)
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region,
)
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region, path,
)
}
AlertData::ContainerStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to_state = fmt_docker_container_state(to);
format!(
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::StackStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to_state = fmt_stack_state(to);
format!(
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!(
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
name, server_name, service, image,
)
}
AlertData::StackAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
images,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images_str = images.join(", ");
format!(
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
name, server_name, images_label, images_str,
)
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!(
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
instance_id, message,
)
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
resource_link(ResourceTargetVariant::ResourceSync, id);
format!(
"{level} | Pending resource sync updates on {}\n{link}",
name,
)
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!(
"{level} | Build {} failed\nversion: v{}\n{link}",
name, version,
)
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for {} failed\n{link}", name,)
}
AlertData::ProcedureFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Procedure, id);
format!("{level} | Procedure {name} failed\n{link}")
}
AlertData::ActionFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Action, id);
format!("{level} | Action {name} failed\n{link}")
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let link = resource_link(*resource_type, id);
format!(
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
send_message(url, email, content).await?;
}
Ok(())
}
async fn send_message(
url: &str,
email: Option<&str>,
content: String,
) -> anyhow::Result<()> {
let mut request = http_client()
.post(url)
.header("Title", "ntfy Alert")
.body(content);
if let Some(email) = email {
request = request.header("X-Email", email);
}
let response =
request.send().await.context("Failed to send message")?;
let status = response.status();
if status.is_success() {
debug!("ntfy alert sent successfully: {}", status);
Ok(())
} else {
let text = response.text().await.with_context(|| {
format!(
"Failed to send message to ntfy | {} | failed to get response text",
status
)
})?;
Err(anyhow!(
"Failed to send message to ntfy | {} | {}",
status,
text
))
}
}
fn http_client() -> &'static reqwest::Client {
static CLIENT: OnceLock<reqwest::Client> = OnceLock::new();
CLIENT.get_or_init(reqwest::Client::new)
}

View File

@@ -1,270 +0,0 @@
use std::sync::OnceLock;
use super::*;
#[instrument(level = "debug")]
pub async fn send_alert(
url: &str,
alert: &Alert,
) -> anyhow::Result<()> {
let level = fmt_level(alert.level);
let content = match &alert.data {
AlertData::Test { id, name } => {
let link = resource_link(ResourceTargetVariant::Alerter, id);
format!(
"{level} | If you see this message, then Alerter {} is working\n{link}",
name,
)
}
AlertData::ServerUnreachable {
id,
name,
region,
err,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
match alert.level {
SeverityLevel::Ok => {
format!(
"{level} | {}{} is now reachable\n{link}",
name, region
)
}
SeverityLevel::Critical => {
let err = err
.as_ref()
.map(|e| format!("\nerror: {:#?}", e))
.unwrap_or_default();
format!(
"{level} | {}{} is unreachable ❌\n{link}{err}",
name, region
)
}
_ => unreachable!(),
}
}
AlertData::ServerCpu {
id,
name,
region,
percentage,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
format!(
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
name, region,
)
}
AlertData::ServerMem {
id,
name,
region,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region,
)
}
AlertData::ServerDisk {
id,
name,
region,
path,
used_gb,
total_gb,
} => {
let region = fmt_region(region);
let link = resource_link(ResourceTargetVariant::Server, id);
let percentage = 100.0 * used_gb / total_gb;
format!(
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
name, region, path,
)
}
AlertData::ContainerStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to_state = fmt_docker_container_state(to);
format!(
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::DeploymentAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
name, server_name, image,
)
}
AlertData::StackStateChange {
id,
name,
server_id: _server_id,
server_name,
from,
to,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to_state = fmt_stack_state(to);
format!(
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
name, to_state, server_name, from,
)
}
AlertData::StackImageUpdateAvailable {
id,
name,
server_id: _server_id,
server_name,
service,
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!(
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
name, server_name, service, image,
)
}
AlertData::StackAutoUpdated {
id,
name,
server_id: _server_id,
server_name,
images,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images_str = images.join(", ");
format!(
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
name, server_name, images_label, images_str,
)
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!(
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
instance_id, message,
)
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
resource_link(ResourceTargetVariant::ResourceSync, id);
format!(
"{level} | Pending resource sync updates on {}\n{link}",
name,
)
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!(
"{level} | Build {name} failed\nversion: v{version}\n{link}",
)
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);
format!("{level} | Repo build for {} failed\n{link}", name,)
}
AlertData::ProcedureFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Procedure, id);
format!("{level} | Procedure {name} failed\n{link}")
}
AlertData::ActionFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Action, id);
format!("{level} | Action {name} failed\n{link}")
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let link = resource_link(*resource_type, id);
format!(
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
)
}
AlertData::None {} => Default::default(),
};
if !content.is_empty() {
send_message(url, content).await?;
}
Ok(())
}
async fn send_message(
url: &str,
content: String,
) -> anyhow::Result<()> {
// pushover needs all information to be encoded in the URL. At minimum they need
// the user key, the application token, and the message (url encoded).
// other optional params here: https://pushover.net/api (just add them to the
// webhook url along with the application token and the user key).
let content = [("message", content)];
let response = http_client()
.post(url)
.form(&content)
.send()
.await
.context("Failed to send message")?;
let status = response.status();
if status.is_success() {
debug!("pushover alert sent successfully: {}", status);
Ok(())
} else {
let text = response.text().await.with_context(|| {
format!(
"Failed to send message to pushover | {} | failed to get response text",
status
)
})?;
Err(anyhow!(
"Failed to send message to pushover | {} | {}",
status,
text
))
}
}
fn http_client() -> &'static reqwest::Client {
static CLIENT: OnceLock<reqwest::Client> = OnceLock::new();
CLIENT.get_or_init(reqwest::Client::new)
}

View File

@@ -373,7 +373,9 @@ pub async fn send_alert(
let text = format!("{level} | Build {name} has failed");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("version: *v{version}*",)),
Block::section(format!(
"build name: *{name}*\nversion: *v{version}*",
)),
Block::section(resource_link(
ResourceTargetVariant::Build,
id,
@@ -386,6 +388,7 @@ pub async fn send_alert(
format!("{level} | Repo build for *{name}* has *failed*");
let blocks = vec![
Block::header(text.clone()),
Block::section(format!("repo name: *{name}*",)),
Block::section(resource_link(
ResourceTargetVariant::Repo,
id,
@@ -393,69 +396,11 @@ pub async fn send_alert(
];
(text, blocks.into())
}
AlertData::ProcedureFailed { id, name } => {
let text = format!("{level} | Procedure *{name}* has *failed*");
let blocks = vec![
Block::header(text.clone()),
Block::section(resource_link(
ResourceTargetVariant::Procedure,
id,
)),
];
(text, blocks.into())
}
AlertData::ActionFailed { id, name } => {
let text = format!("{level} | Action *{name}* has *failed*");
let blocks = vec![
Block::header(text.clone()),
Block::section(resource_link(
ResourceTargetVariant::Action,
id,
)),
];
(text, blocks.into())
}
AlertData::ScheduleRun {
resource_type,
id,
name,
} => {
let text = format!(
"{level} | *{name}* ({resource_type}) | Scheduled run started 🕝"
);
let blocks = vec![
Block::header(text.clone()),
Block::section(resource_link(*resource_type, id)),
];
(text, blocks.into())
}
AlertData::None {} => Default::default(),
};
if !text.is_empty() {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut url_interpolated = url.to_string();
// interpolate variables and secrets into the url
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut url_interpolated,
&mut global_replacers,
&mut secret_replacers,
)?;
let slack = ::slack::Client::new(url_interpolated);
slack.send_message(text, blocks).await.map_err(|e| {
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with slack request: {}",
sanitized_error
))
})?;
let slack = ::slack::Client::new(url);
slack.send_message(text, blocks).await?;
}
Ok(())
}

View File

@@ -1,12 +1,11 @@
use std::{sync::OnceLock, time::Instant};
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
use axum::{Router, http::HeaderMap, routing::post};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::auth::*, entities::user::User};
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
@@ -23,8 +22,6 @@ use crate::{
state::jwt_client,
};
use super::Variant;
pub struct AuthArgs {
pub headers: HeaderMap,
}
@@ -48,9 +45,7 @@ pub enum AuthRequest {
}
pub fn router() -> Router {
let mut router = Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler));
let mut router = Router::new().route("/", post(handler));
if core_config().local_auth {
info!("🔑 Local Login Enabled");
@@ -74,18 +69,6 @@ pub fn router() -> Router {
router
}
async fn variant_handler(
headers: HeaderMap,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<axum::response::Response> {
let req: AuthRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(headers, Json(req)).await
}
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
async fn handler(
headers: HeaderMap,

View File

@@ -13,13 +13,8 @@ use komodo_client::{
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
},
entities::{
action::Action,
alert::{Alert, AlertData, SeverityLevel},
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
update::Update,
user::action_user,
action::Action, config::core::CoreConfig,
permission::PermissionLevel, update::Update, user::action_user,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
@@ -27,7 +22,6 @@ use resolver_api::Resolve;
use tokio::fs;
use crate::{
alert::send_alerts,
api::{execute::ExecuteRequest, user::UserArgs},
config::core_config,
helpers::{
@@ -39,8 +33,7 @@ use crate::{
random_string,
update::update_update,
},
permission::get_check_permissions,
resource::refresh_action_state_cache,
resource::{self, refresh_action_state_cache},
state::{action_states, db_client},
};
@@ -72,10 +65,10 @@ impl Resolve<ExecuteArgs> for RunAction {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut action = get_check_permissions::<Action>(
let mut action = resource::get_check_permissions::<Action>(
&self.action,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -135,18 +128,12 @@ impl Resolve<ExecuteArgs> for RunAction {
""
};
let reload = if action.config.reload_deno_deps {
" --reload"
} else {
""
};
let mut res = run_komodo_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!(
"deno run --allow-all{https_cert_flag}{reload} {}",
"deno run --allow-all{https_cert_flag} {}",
path.display()
),
)
@@ -191,26 +178,6 @@ impl Resolve<ExecuteArgs> for RunAction {
update_update(update.clone()).await?;
if !update.success && action.config.failure_alert {
warn!("action unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::ActionFailed {
id: action.id,
name: action.name,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
}
}
@@ -252,7 +219,7 @@ fn full_contents(contents: &str, key: &str, secret: &str) -> String {
let protocol = if *ssl_enabled { "https" } else { "http" };
let base_url = format!("{protocol}://localhost:{port}");
format!(
"import {{ KomodoClient, Types }} from '{base_url}/client/lib.js';
"import {{ KomodoClient }} from '{base_url}/client/lib.js';
import * as __YAML__ from 'jsr:@std/yaml';
import * as __TOML__ from 'jsr:@std/toml';
@@ -288,7 +255,7 @@ main()
console.error('Status:', error.status);
console.error(JSON.stringify(error.result, null, 2));
}} else {{
console.error(error);
console.error(JSON.stringify(error, null, 2));
}}
Deno.exit(1)
}});"

View File

@@ -12,7 +12,7 @@ use resolver_api::Resolve;
use crate::{
alert::send_alert_to_alerter, helpers::update::update_update,
permission::get_check_permissions,
resource::get_check_permissions,
};
use super::ExecuteArgs;
@@ -26,7 +26,7 @@ impl Resolve<ExecuteArgs> for TestAlerter {
let alerter = get_check_permissions::<Alerter>(
&self.alerter,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;

View File

@@ -48,7 +48,6 @@ use crate::{
registry_token,
update::{init_execution_update, update_update},
},
permission::get_check_permissions,
resource::{self, refresh_build_state_cache},
state::{action_states, db_client},
};
@@ -81,19 +80,13 @@ impl Resolve<ExecuteArgs> for RunBuild {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut build = get_check_permissions::<Build>(
let mut build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
let mut vars_and_secrets = get_variables_and_secrets().await?;
// Add the $VERSION to variables. Use with [[$VERSION]]
vars_and_secrets.variables.insert(
String::from("$VERSION"),
build.config.version.to_string(),
);
if build.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to RunBuild").into());
@@ -117,6 +110,14 @@ impl Resolve<ExecuteArgs> for RunBuild {
update.version = build.config.version;
update_update(update.clone()).await?;
// Add the $VERSION to variables. Use with [[$VERSION]]
if !vars_and_secrets.variables.contains_key("$VERSION") {
vars_and_secrets.variables.insert(
String::from("$VERSION"),
build.config.version.to_string(),
);
}
let git_token = git_token(
&build.config.git_provider,
&build.config.git_account,
@@ -176,6 +177,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
});
// GET BUILDER PERIPHERY
let (periphery, cleanup_data) = match get_builder_periphery(
build.name.clone(),
Some(build.config.version),
@@ -201,8 +203,9 @@ impl Resolve<ExecuteArgs> for RunBuild {
}
};
// INTERPOLATE VARIABLES
// CLONE REPO
let secret_replacers = if !build.config.skip_secret_interp {
// Interpolate variables / secrets into pre build command
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
@@ -213,34 +216,6 @@ impl Resolve<ExecuteArgs> for RunBuild {
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.build_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.secret_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.dockerfile,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut build.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
@@ -252,57 +227,84 @@ impl Resolve<ExecuteArgs> for RunBuild {
Default::default()
};
let commit_message = if !build.config.files_on_host
&& !build.config.repo.is_empty()
{
// CLONE REPO
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&build).into(),
git_token,
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
replacers: Default::default(),
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update, build.id, build.name, true).await
},
};
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
update.logs.extend(res.logs);
update.commit_hash =
res.commit_hash.unwrap_or_default().to_string();
res.commit_message.unwrap_or_default()
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
Default::default()
}
};
update_update(update.clone()).await?;
Some(commit_message)
} else {
None
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&build).into(),
git_token,
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
replacers: secret_replacers.into_iter().collect(),
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_early_return(update, build.id, build.name, true).await
},
};
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
update.logs.extend(res.logs);
update.commit_hash =
res.commit_hash.unwrap_or_default().to_string();
res.commit_message.unwrap_or_default()
}
Err(e) => {
warn!("failed build at clone repo | {e:#}");
update.push_error_log(
"clone repo",
format_serror(&e.context("failed to clone repo").into()),
);
Default::default()
}
};
update_update(update.clone()).await?;
if all_logs_success(&update.logs) {
// RUN BUILD
let secret_replacers = if !build.config.skip_secret_interp {
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.build_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_string(
&vars_and_secrets,
&mut build.config.secret_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut build.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let res = tokio::select! {
res = periphery
.request(api::build::Build {
@@ -319,7 +321,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
_ = cancel.cancelled() => {
info!("build cancelled during build, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
cleanup_builder_instance(cleanup_data, &mut update)
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
return handle_early_return(update, build.id, build.name, true).await
},
@@ -363,9 +365,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
// stop the cancel listening task from going forever
cancel.cancel();
// If building on temporary cloud server (AWS),
// this will terminate the server.
cleanup_builder_instance(cleanup_data, &mut update).await;
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
@@ -514,10 +515,10 @@ impl Resolve<ExecuteArgs> for CancelBuild {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;

View File

@@ -34,7 +34,6 @@ use crate::{
update::update_update,
},
monitor::update_cache_for_server,
permission::get_check_permissions,
resource,
state::action_states,
};
@@ -69,10 +68,10 @@ async fn setup_deployment_execution(
deployment: &str,
user: &User,
) -> anyhow::Result<(Deployment, Server)> {
let deployment = get_check_permissions::<Deployment>(
let deployment = resource::get_check_permissions::<Deployment>(
deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;

View File

@@ -1,9 +1,7 @@
use std::{pin::Pin, time::Instant};
use anyhow::Context;
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
};
use axum::{Extension, Router, middleware, routing::post};
use axum_extra::{TypedHeader, headers::ContentType};
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
@@ -12,7 +10,6 @@ use komodo_client::{
api::execute::*,
entities::{
Operation,
permission::PermissionLevel,
update::{Log, Update},
user::User,
},
@@ -21,7 +18,6 @@ use mungos::by_id::find_one_by_id;
use resolver_api::Resolve;
use response::JsonString;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
@@ -40,11 +36,10 @@ mod deployment;
mod procedure;
mod repo;
mod server;
mod server_template;
mod stack;
mod sync;
use super::Variant;
pub use {
deployment::pull_deployment_inner, stack::pull_stack_inner,
};
@@ -87,21 +82,6 @@ pub enum ExecuteRequest {
PruneBuildx(PruneBuildx),
PruneSystem(PruneSystem),
// ==== STACK ====
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
DeployStackIfChanged(DeployStackIfChanged),
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
PullStack(PullStack),
BatchPullStack(BatchPullStack),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
// ==== DEPLOYMENT ====
Deploy(Deploy),
BatchDeploy(BatchDeploy),
@@ -114,6 +94,20 @@ pub enum ExecuteRequest {
DestroyDeployment(DestroyDeployment),
BatchDestroyDeployment(BatchDestroyDeployment),
// ==== STACK ====
DeployStack(DeployStack),
BatchDeployStack(BatchDeployStack),
DeployStackIfChanged(DeployStackIfChanged),
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
PullStack(PullStack),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
PauseStack(PauseStack),
UnpauseStack(UnpauseStack),
DestroyStack(DestroyStack),
BatchDestroyStack(BatchDestroyStack),
// ==== BUILD ====
RunBuild(RunBuild),
BatchRunBuild(BatchRunBuild),
@@ -136,6 +130,9 @@ pub enum ExecuteRequest {
RunAction(RunAction),
BatchRunAction(BatchRunAction),
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),
// ==== ALERTER ====
TestAlerter(TestAlerter),
@@ -146,22 +143,9 @@ pub enum ExecuteRequest {
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<(TypedHeader<ContentType>, String)> {
let req: ExecuteRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(user, Json(req)).await
}
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<ExecuteRequest>,
@@ -299,7 +283,6 @@ async fn batch_execute<E: BatchExecute>(
pattern,
Default::default(),
user,
PermissionLevel::Execute.into(),
&[],
)
.await?;

View File

@@ -6,12 +6,8 @@ use komodo_client::{
BatchExecutionResponse, BatchRunProcedure, RunProcedure,
},
entities::{
alert::{Alert, AlertData, SeverityLevel},
komodo_timestamp,
permission::PermissionLevel,
procedure::Procedure,
update::Update,
user::User,
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
@@ -19,10 +15,8 @@ use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::{
alert::send_alerts,
helpers::{procedure::execute_procedure, update::update_update},
permission::get_check_permissions,
resource::refresh_procedure_state_cache,
resource::{self, refresh_procedure_state_cache},
state::{action_states, db_client},
};
@@ -71,10 +65,10 @@ fn resolve_inner(
>,
> {
Box::pin(async move {
let procedure = get_check_permissions::<Procedure>(
let procedure = resource::get_check_permissions::<Procedure>(
&procedure,
&user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -143,26 +137,6 @@ fn resolve_inner(
update_update(update.clone()).await?;
if !update.success && procedure.config.failure_alert {
warn!("procedure unsuccessful, alerting...");
let target = update.target.clone();
tokio::spawn(async move {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::ProcedureFailed {
id: procedure.id,
name: procedure.name,
},
};
send_alerts(&[alert]).await
});
}
Ok(update)
})
}

View File

@@ -41,7 +41,6 @@ use crate::{
query::get_variables_and_secrets,
update::update_update,
},
permission::get_check_permissions,
resource::{self, refresh_repo_state_cache},
state::{action_states, db_client},
};
@@ -74,10 +73,10 @@ impl Resolve<ExecuteArgs> for CloneRepo {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -186,10 +185,10 @@ impl Resolve<ExecuteArgs> for PullRepo {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -341,10 +340,10 @@ impl Resolve<ExecuteArgs> for BuildRepo {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let mut repo = get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -463,7 +462,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
cleanup_builder_instance(cleanup_data, &mut update)
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
info!("builder cleaned up");
return handle_builder_early_return(update, repo.id, repo.name, true).await
@@ -507,9 +506,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
// stop the cancel listening task from going forever
cancel.cancel();
// If building on temporary cloud server (AWS),
// this will terminate the server.
cleanup_builder_instance(cleanup_data, &mut update).await;
cleanup_builder_instance(periphery, cleanup_data, &mut update)
.await;
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
@@ -652,10 +650,10 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;

View File

@@ -15,7 +15,7 @@ use resolver_api::Resolve;
use crate::{
helpers::{periphery_client, update::update_update},
monitor::update_cache_for_server,
permission::get_check_permissions,
resource,
state::action_states,
};
@@ -27,10 +27,10 @@ impl Resolve<ExecuteArgs> for StartContainer {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -81,10 +81,10 @@ impl Resolve<ExecuteArgs> for RestartContainer {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -137,10 +137,10 @@ impl Resolve<ExecuteArgs> for PauseContainer {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -191,10 +191,10 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -247,10 +247,10 @@ impl Resolve<ExecuteArgs> for StopContainer {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -309,10 +309,10 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
signal,
time,
} = self;
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -365,10 +365,10 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -415,10 +415,10 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -467,10 +467,10 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -517,10 +517,10 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -569,10 +569,10 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -619,10 +619,10 @@ impl Resolve<ExecuteArgs> for PruneContainers {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -675,10 +675,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -726,10 +726,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -780,10 +780,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -828,10 +828,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -880,10 +880,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -931,10 +931,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -983,10 +983,10 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -1035,10 +1035,10 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -1087,10 +1087,10 @@ impl Resolve<ExecuteArgs> for PruneSystem {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;

View File

@@ -0,0 +1,156 @@
use anyhow::{Context, anyhow};
use formatting::format_serror;
use komodo_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
permission::PermissionLevel,
server::PartialServerConfig,
server_template::{ServerTemplate, ServerTemplateConfig},
update::Update,
},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
api::write::WriteArgs,
cloud::{
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
},
helpers::update::update_update,
resource,
state::db_client,
};
use super::ExecuteArgs;
impl Resolve<ExecuteArgs> for LaunchServer {
#[instrument(name = "LaunchServer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
// validate name isn't already taken by another server
if db_client()
.servers
.find_one(doc! {
"name": &self.name
})
.await
.context("failed to query db for servers")?
.is_some()
{
return Err(anyhow!("name is already taken").into());
}
let template = resource::get_check_permissions::<ServerTemplate>(
&self.server_template,
user,
PermissionLevel::Execute,
)
.await?;
let mut update = update.clone();
update.push_simple_log(
"launching server",
format!("{:#?}", template.config),
);
update_update(update.clone()).await?;
let config = match template.config {
ServerTemplateConfig::Aws(config) => {
let region = config.region.clone();
let use_https = config.use_https;
let port = config.port;
let instance =
match launch_ec2_instance(&self.name, config).await {
Ok(instance) => instance,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch aws instance\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
"successfully launched server {} on ip {}",
self.name, instance.ip
),
);
let protocol = if use_https { "https" } else { "http" };
PartialServerConfig {
address: format!("{protocol}://{}:{port}", instance.ip)
.into(),
region: region.into(),
..Default::default()
}
}
ServerTemplateConfig::Hetzner(config) => {
let datacenter = config.datacenter;
let use_https = config.use_https;
let port = config.port;
let server =
match launch_hetzner_server(&self.name, config).await {
Ok(server) => server,
Err(e) => {
update.push_error_log(
"launch server",
format!("failed to launch hetzner server\n\n{e:#?}"),
);
update.finalize();
update_update(update.clone()).await?;
return Ok(update);
}
};
update.push_simple_log(
"launch server",
format!(
"successfully launched server {} on ip {}",
self.name, server.ip
),
);
let protocol = if use_https { "https" } else { "http" };
PartialServerConfig {
address: format!("{protocol}://{}:{port}", server.ip)
.into(),
region: datacenter.as_ref().to_string().into(),
..Default::default()
}
}
};
match (CreateServer {
name: self.name,
config,
})
.resolve(&WriteArgs { user: user.clone() })
.await
{
Ok(server) => {
update.push_simple_log(
"create server",
format!("created server {} ({})", server.name, server.id),
);
update.other_data = server.id;
}
Err(e) => {
update.push_error_log(
"create server",
format_serror(
&e.error.context("failed to create server").into(),
),
);
}
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -29,7 +29,6 @@ use crate::{
update::{add_update_without_send, update_update},
},
monitor::update_cache_for_server,
permission::get_check_permissions,
resource,
stack::{execute::execute_compose, get_stack_and_server},
state::{action_states, db_client},
@@ -70,7 +69,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
let (mut stack, server) = get_stack_and_server(
&self.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
true,
)
.await?;
@@ -321,10 +320,10 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
self,
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
RefreshStackCache {
@@ -386,29 +385,6 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
}
}
impl super::BatchExecute for BatchPullStack {
type Resource = Stack;
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::PullStack(PullStack {
stack,
services: Vec::new(),
})
}
}
impl Resolve<ExecuteArgs> for BatchPullStack {
#[instrument(name = "BatchPullStack", skip(user), fields(user_id = user.id))]
async fn resolve(
self,
ExecuteArgs { user, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullStack>(&self.pattern, user)
.await?,
)
}
}
pub async fn pull_stack_inner(
mut stack: Stack,
services: Vec<String>,
@@ -496,7 +472,7 @@ impl Resolve<ExecuteArgs> for PullStack {
let (stack, server) = get_stack_and_server(
&self.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
true,
)
.await?;

View File

@@ -16,6 +16,7 @@ use komodo_client::{
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
update::{Log, Update},
@@ -29,7 +30,7 @@ use resolver_api::Resolve;
use crate::{
api::write::WriteArgs,
helpers::{query::get_id_to_tags, update::update_update},
permission::get_check_permissions,
resource,
state::{action_states, db_client},
sync::{
AllResourcesById, ResourceSyncTrait,
@@ -54,11 +55,9 @@ impl Resolve<ExecuteArgs> for RunSync {
resource_type: match_resource_type,
resources: match_resources,
} = self;
let sync = get_check_permissions::<entities::sync::ResourceSync>(
&sync,
user,
PermissionLevel::Execute.into(),
)
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, user, PermissionLevel::Execute)
.await?;
// get the action state for the sync (or insert default).
@@ -143,6 +142,10 @@ impl Resolve<ExecuteArgs> for RunSync {
.servers
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::ServerTemplate => all_resources
.templates
.get(&name_or_id)
.map(|t| t.name.clone()),
ResourceTargetVariant::Stack => all_resources
.stacks
.get(&name_or_id)
@@ -329,6 +332,20 @@ impl Resolve<ExecuteArgs> for RunSync {
} else {
Default::default()
};
let server_template_deltas = if sync.config.include_resources {
get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let resource_sync_deltas = if sync.config.include_resources {
get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
@@ -380,6 +397,7 @@ impl Resolve<ExecuteArgs> for RunSync {
if deploy_cache.is_empty()
&& resource_sync_deltas.no_changes()
&& server_template_deltas.no_changes()
&& server_deltas.no_changes()
&& deployment_deltas.no_changes()
&& stack_deltas.no_changes()
@@ -433,6 +451,11 @@ impl Resolve<ExecuteArgs> for RunSync {
&mut update.logs,
ResourceSync::execute_sync_updates(resource_sync_deltas).await,
);
maybe_extend(
&mut update.logs,
ServerTemplate::execute_sync_updates(server_template_deltas)
.await,
);
maybe_extend(
&mut update.logs,
Server::execute_sync_updates(server_deltas).await,

View File

@@ -1,11 +1,5 @@
pub mod auth;
pub mod execute;
pub mod read;
pub mod terminal;
pub mod user;
pub mod write;
#[derive(serde::Deserialize)]
struct Variant {
variant: String,
}

View File

@@ -12,7 +12,6 @@ use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{action_state_cache, action_states},
};
@@ -25,10 +24,10 @@ impl Resolve<ReadArgs> for GetAction {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Action> {
Ok(
get_check_permissions::<Action>(
resource::get_check_permissions::<Action>(
&self.action,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -46,13 +45,8 @@ impl Resolve<ReadArgs> for ListActions {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Action>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
resource::list_for_user::<Action>(self.query, user, &all_tags)
.await?,
)
}
}
@@ -69,10 +63,7 @@ impl Resolve<ReadArgs> for ListFullActions {
};
Ok(
resource::list_full_for_user::<Action>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -84,10 +75,10 @@ impl Resolve<ReadArgs> for GetActionActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ActionActionState> {
let action = get_check_permissions::<Action>(
let action = resource::get_check_permissions::<Action>(
&self.action,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -108,7 +99,6 @@ impl Resolve<ReadArgs> for GetActionsSummary {
let actions = resource::list_full_for_user::<Action>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -16,7 +16,7 @@ use mungos::{
use resolver_api::Resolve;
use crate::{
config::core_config, permission::get_resource_ids_for_user,
config::core_config, resource::get_resource_ids_for_user,
state::db_client,
};

View File

@@ -11,8 +11,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags, resource, state::db_client,
};
use super::ReadArgs;
@@ -23,10 +22,10 @@ impl Resolve<ReadArgs> for GetAlerter {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Alerter> {
Ok(
get_check_permissions::<Alerter>(
resource::get_check_permissions::<Alerter>(
&self.alerter,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -44,13 +43,8 @@ impl Resolve<ReadArgs> for ListAlerters {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Alerter>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
resource::list_for_user::<Alerter>(self.query, user, &all_tags)
.await?,
)
}
}
@@ -67,10 +61,7 @@ impl Resolve<ReadArgs> for ListFullAlerters {
};
Ok(
resource::list_full_for_user::<Alerter>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)

View File

@@ -22,7 +22,6 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{
action_states, build_state_cache, db_client, github_client,
@@ -37,10 +36,10 @@ impl Resolve<ReadArgs> for GetBuild {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Build> {
Ok(
get_check_permissions::<Build>(
resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -58,13 +57,8 @@ impl Resolve<ReadArgs> for ListBuilds {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Build>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
resource::list_for_user::<Build>(self.query, user, &all_tags)
.await?,
)
}
}
@@ -81,10 +75,7 @@ impl Resolve<ReadArgs> for ListFullBuilds {
};
Ok(
resource::list_full_for_user::<Build>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -96,10 +87,10 @@ impl Resolve<ReadArgs> for GetBuildActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<BuildActionState> {
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -120,7 +111,6 @@ impl Resolve<ReadArgs> for GetBuildsSummary {
let builds = resource::list_full_for_user::<Build>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -228,10 +218,10 @@ impl Resolve<ReadArgs> for ListBuildVersions {
patch,
limit,
} = self;
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&build,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
@@ -284,10 +274,7 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
get_all_tags(None).await?
};
let builds = resource::list_full_for_user::<Build>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -319,10 +306,10 @@ impl Resolve<ReadArgs> for GetBuildWebhookEnabled {
});
};
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;

View File

@@ -11,8 +11,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, permission::get_check_permissions,
resource, state::db_client,
helpers::query::get_all_tags, resource, state::db_client,
};
use super::ReadArgs;
@@ -23,10 +22,10 @@ impl Resolve<ReadArgs> for GetBuilder {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Builder> {
Ok(
get_check_permissions::<Builder>(
resource::get_check_permissions::<Builder>(
&self.builder,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -44,13 +43,8 @@ impl Resolve<ReadArgs> for ListBuilders {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Builder>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
resource::list_for_user::<Builder>(self.query, user, &all_tags)
.await?,
)
}
}
@@ -67,10 +61,7 @@ impl Resolve<ReadArgs> for ListFullBuilders {
};
Ok(
resource::list_full_for_user::<Builder>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)

View File

@@ -8,22 +8,19 @@ use komodo_client::{
Deployment, DeploymentActionState, DeploymentConfig,
DeploymentListItem, DeploymentState,
},
docker::container::{Container, ContainerStats},
docker::container::ContainerStats,
permission::PermissionLevel,
server::{Server, ServerState},
server::Server,
update::Log,
},
};
use periphery_client::api::{self, container::InspectContainer};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
resource,
state::{
action_states, deployment_status_cache, server_status_cache,
},
state::{action_states, deployment_status_cache},
};
use super::ReadArgs;
@@ -34,10 +31,10 @@ impl Resolve<ReadArgs> for GetDeployment {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Deployment> {
Ok(
get_check_permissions::<Deployment>(
resource::get_check_permissions::<Deployment>(
&self.deployment,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -56,10 +53,7 @@ impl Resolve<ReadArgs> for ListDeployments {
};
let only_update_available = self.query.specific.update_available;
let deployments = resource::list_for_user::<Deployment>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?;
let deployments = if only_update_available {
@@ -86,10 +80,7 @@ impl Resolve<ReadArgs> for ListFullDeployments {
};
Ok(
resource::list_full_for_user::<Deployment>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -101,10 +92,10 @@ impl Resolve<ReadArgs> for GetDeploymentContainer {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetDeploymentContainerResponse> {
let deployment = get_check_permissions::<Deployment>(
let deployment = resource::get_check_permissions::<Deployment>(
&self.deployment,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let status = deployment_status_cache()
@@ -135,10 +126,10 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
} = resource::get_check_permissions::<Deployment>(
&deployment,
user,
PermissionLevel::Read.logs(),
PermissionLevel::Read,
)
.await?;
if server_id.is_empty() {
@@ -173,10 +164,10 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
} = resource::get_check_permissions::<Deployment>(
&deployment,
user,
PermissionLevel::Read.logs(),
PermissionLevel::Read,
)
.await?;
if server_id.is_empty() {
@@ -197,50 +188,6 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
}
}
impl Resolve<ReadArgs> for InspectDeploymentContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Container> {
let InspectDeploymentContainer { deployment } = self;
let Deployment {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
&deployment,
user,
PermissionLevel::Read.inspect(),
)
.await?;
if server_id.is_empty() {
return Err(
anyhow!(
"Cannot inspect deployment, not attached to any server"
)
.into(),
);
}
let server = resource::get::<Server>(&server_id).await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(
anyhow!(
"Cannot inspect container: server is {:?}",
cache.state
)
.into(),
);
}
let res = periphery_client(&server)?
.request(InspectContainer { name })
.await?;
Ok(res)
}
}
impl Resolve<ReadArgs> for GetDeploymentStats {
async fn resolve(
self,
@@ -250,10 +197,10 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
name,
config: DeploymentConfig { server_id, .. },
..
} = get_check_permissions::<Deployment>(
} = resource::get_check_permissions::<Deployment>(
&self.deployment,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
if server_id.is_empty() {
@@ -275,10 +222,10 @@ impl Resolve<ReadArgs> for GetDeploymentActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<DeploymentActionState> {
let deployment = get_check_permissions::<Deployment>(
let deployment = resource::get_check_permissions::<Deployment>(
&self.deployment,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -299,7 +246,6 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -343,10 +289,7 @@ impl Resolve<ReadArgs> for ListCommonDeploymentExtraArgs {
get_all_tags(None).await?
};
let deployments = resource::list_full_for_user::<Deployment>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;

View File

@@ -1,9 +1,7 @@
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::{Context, anyhow};
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
};
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{
api::read::*,
entities::{
@@ -11,7 +9,6 @@ use komodo_client::{
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
permission::PermissionLevel,
repo::Repo,
server::Server,
sync::ResourceSync,
@@ -21,7 +18,6 @@ use komodo_client::{
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
@@ -31,8 +27,6 @@ use crate::{
resource,
};
use super::Variant;
mod action;
mod alert;
mod alerter;
@@ -44,6 +38,7 @@ mod procedure;
mod provider;
mod repo;
mod server;
mod server_template;
mod stack;
mod sync;
mod tag;
@@ -72,7 +67,7 @@ enum ReadRequest {
// ==== USER ====
GetUsername(GetUsername),
GetPermission(GetPermission),
GetPermissionLevel(GetPermissionLevel),
FindUser(FindUser),
ListUsers(ListUsers),
ListApiKeys(ListApiKeys),
@@ -98,6 +93,12 @@ enum ReadRequest {
ListActions(ListActions),
ListFullActions(ListFullActions),
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
GetServerTemplatesSummary(GetServerTemplatesSummary),
ListServerTemplates(ListServerTemplates),
ListFullServerTemplates(ListFullServerTemplates),
// ==== SERVER ====
GetServersSummary(GetServersSummary),
GetServer(GetServer),
@@ -115,33 +116,12 @@ enum ReadRequest {
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
GetDockerContainersSummary(GetDockerContainersSummary),
ListAllDockerContainers(ListAllDockerContainers),
ListDockerContainers(ListDockerContainers),
ListDockerNetworks(ListDockerNetworks),
ListDockerImages(ListDockerImages),
ListDockerVolumes(ListDockerVolumes),
ListComposeProjects(ListComposeProjects),
ListTerminals(ListTerminals),
// ==== SERVER STATS ====
GetSystemInformation(GetSystemInformation),
GetSystemStats(GetSystemStats),
ListSystemProcesses(ListSystemProcesses),
// ==== STACK ====
GetStacksSummary(GetStacksSummary),
GetStack(GetStack),
GetStackActionState(GetStackActionState),
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
GetStackLog(GetStackLog),
SearchStackLog(SearchStackLog),
InspectStackContainer(InspectStackContainer),
ListStacks(ListStacks),
ListFullStacks(ListFullStacks),
ListStackServices(ListStackServices),
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
// ==== DEPLOYMENT ====
GetDeploymentsSummary(GetDeploymentsSummary),
@@ -151,7 +131,6 @@ enum ReadRequest {
GetDeploymentStats(GetDeploymentStats),
GetDeploymentLog(GetDeploymentLog),
SearchDeploymentLog(SearchDeploymentLog),
InspectDeploymentContainer(InspectDeploymentContainer),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
@@ -183,6 +162,19 @@ enum ReadRequest {
ListResourceSyncs(ListResourceSyncs),
ListFullResourceSyncs(ListFullResourceSyncs),
// ==== STACK ====
GetStacksSummary(GetStacksSummary),
GetStack(GetStack),
GetStackActionState(GetStackActionState),
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
GetStackLog(GetStackLog),
SearchStackLog(SearchStackLog),
ListStacks(ListStacks),
ListFullStacks(ListFullStacks),
ListStackServices(ListStackServices),
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
// ==== BUILDER ====
GetBuildersSummary(GetBuildersSummary),
GetBuilder(GetBuilder),
@@ -211,6 +203,11 @@ enum ReadRequest {
ListAlerts(ListAlerts),
GetAlert(GetAlert),
// ==== SERVER STATS ====
GetSystemInformation(GetSystemInformation),
GetSystemStats(GetSystemStats),
ListSystemProcesses(ListSystemProcesses),
// ==== VARIABLE ====
GetVariable(GetVariable),
ListVariables(ListVariables),
@@ -225,22 +222,9 @@ enum ReadRequest {
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<axum::response::Response> {
let req: ReadRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(user, Json(req)).await
}
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
async fn handler(
Extension(user): Extension<User>,
@@ -285,7 +269,6 @@ fn core_info() -> &'static GetCoreInfoResponse {
ui_write_disabled: config.ui_write_disabled,
disable_confirm_dialog: config.disable_confirm_dialog,
disable_non_admin_create: config.disable_non_admin_create,
disable_websocket_reconnect: config.disable_websocket_reconnect,
github_webhook_owners: config
.github_webhook_app
.installations
@@ -399,19 +382,16 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
resource::list_full_for_user::<Build>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[]
),
resource::list_full_for_user::<Repo>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[]
),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[]
),
)?;

View File

@@ -1,7 +1,7 @@
use anyhow::{Context, anyhow};
use komodo_client::{
api::read::{
GetPermission, GetPermissionResponse, ListPermissions,
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
ListPermissionsResponse, ListUserTargetPermissions,
ListUserTargetPermissionsResponse,
},
@@ -35,13 +35,13 @@ impl Resolve<ReadArgs> for ListPermissions {
}
}
impl Resolve<ReadArgs> for GetPermission {
impl Resolve<ReadArgs> for GetPermissionLevel {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetPermissionResponse> {
) -> serror::Result<GetPermissionLevelResponse> {
if user.admin {
return Ok(PermissionLevel::Write.all());
return Ok(PermissionLevel::Write);
}
Ok(get_user_permission_on_target(user, &self.target).await?)
}

View File

@@ -10,7 +10,6 @@ use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{action_states, procedure_state_cache},
};
@@ -23,10 +22,10 @@ impl Resolve<ReadArgs> for GetProcedure {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetProcedureResponse> {
Ok(
get_check_permissions::<Procedure>(
resource::get_check_permissions::<Procedure>(
&self.procedure,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -45,10 +44,7 @@ impl Resolve<ReadArgs> for ListProcedures {
};
Ok(
resource::list_for_user::<Procedure>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -67,10 +63,7 @@ impl Resolve<ReadArgs> for ListFullProcedures {
};
Ok(
resource::list_full_for_user::<Procedure>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -85,7 +78,6 @@ impl Resolve<ReadArgs> for GetProceduresSummary {
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -128,10 +120,10 @@ impl Resolve<ReadArgs> for GetProcedureActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetProcedureActionStateResponse> {
let procedure = get_check_permissions::<Procedure>(
let procedure = resource::get_check_permissions::<Procedure>(
&self.procedure,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()

View File

@@ -12,7 +12,6 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{action_states, github_client, repo_state_cache},
};
@@ -25,10 +24,10 @@ impl Resolve<ReadArgs> for GetRepo {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Repo> {
Ok(
get_check_permissions::<Repo>(
resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -46,13 +45,8 @@ impl Resolve<ReadArgs> for ListRepos {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Repo>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
resource::list_for_user::<Repo>(self.query, user, &all_tags)
.await?,
)
}
}
@@ -69,10 +63,7 @@ impl Resolve<ReadArgs> for ListFullRepos {
};
Ok(
resource::list_full_for_user::<Repo>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -84,10 +75,10 @@ impl Resolve<ReadArgs> for GetRepoActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<RepoActionState> {
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -108,7 +99,6 @@ impl Resolve<ReadArgs> for GetReposSummary {
let repos = resource::list_full_for_user::<Repo>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -170,10 +160,10 @@ impl Resolve<ReadArgs> for GetRepoWebhooksEnabled {
});
};
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;

View File

@@ -14,18 +14,14 @@ use komodo_client::{
ResourceTarget,
deployment::Deployment,
docker::{
container::{
Container, ContainerListItem, ContainerStateStatusEnum,
},
container::{Container, ContainerListItem},
image::{Image, ImageHistoryResponseItem},
network::Network,
volume::Volume,
},
komodo_timestamp,
permission::PermissionLevel,
server::{
Server, ServerActionState, ServerListItem, ServerState,
TerminalInfo,
},
stack::{Stack, StackServiceNames},
stats::{SystemInformation, SystemProcess},
@@ -47,11 +43,7 @@ use resolver_api::Resolve;
use tokio::sync::Mutex;
use crate::{
helpers::{
periphery_client,
query::{get_all_tags, get_system_info},
},
permission::get_check_permissions,
helpers::{periphery_client, query::get_all_tags},
resource,
stack::compose_container_match_regex,
state::{action_states, db_client, server_status_cache},
@@ -67,7 +59,6 @@ impl Resolve<ReadArgs> for GetServersSummary {
let servers = resource::list_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?;
@@ -95,10 +86,10 @@ impl Resolve<ReadArgs> for GetPeripheryVersion {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetPeripheryVersionResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let version = server_status_cache()
@@ -116,10 +107,10 @@ impl Resolve<ReadArgs> for GetServer {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Server> {
Ok(
get_check_permissions::<Server>(
resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -137,13 +128,8 @@ impl Resolve<ReadArgs> for ListServers {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Server>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?,
resource::list_for_user::<Server>(self.query, user, &all_tags)
.await?,
)
}
}
@@ -160,10 +146,7 @@ impl Resolve<ReadArgs> for ListFullServers {
};
Ok(
resource::list_full_for_user::<Server>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -175,10 +158,10 @@ impl Resolve<ReadArgs> for GetServerState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetServerStateResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let status = server_status_cache()
@@ -197,10 +180,10 @@ impl Resolve<ReadArgs> for GetServerActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ServerActionState> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -213,18 +196,46 @@ impl Resolve<ReadArgs> for GetServerActionState {
}
}
// This protects the peripheries from spam requests
const SYSTEM_INFO_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
type SystemInfoCache =
Mutex<HashMap<String, Arc<(SystemInformation, u128)>>>;
fn system_info_cache() -> &'static SystemInfoCache {
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
OnceLock::new();
SYSTEM_INFO_CACHE.get_or_init(Default::default)
}
impl Resolve<ReadArgs> for GetSystemInformation {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<SystemInformation> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
get_system_info(&server).await.map_err(Into::into)
let mut lock = system_info_cache().lock().await;
let res = match lock.get(&server.id) {
Some(cached) if cached.1 > unix_timestamp_ms() => {
cached.0.clone()
}
_ => {
let stats = periphery_client(&server)?
.request(periphery::stats::GetSystemInformation {})
.await?;
lock.insert(
server.id,
(stats.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
.into(),
);
stats
}
};
Ok(res)
}
}
@@ -233,10 +244,10 @@ impl Resolve<ReadArgs> for GetSystemStats {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetSystemStatsResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let status =
@@ -265,10 +276,10 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListSystemProcessesResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.processes(),
PermissionLevel::Read,
)
.await?;
let mut lock = processes_cache().lock().await;
@@ -304,10 +315,10 @@ impl Resolve<ReadArgs> for GetHistoricalServerStats {
granularity,
page,
} = self;
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let granularity =
@@ -352,10 +363,10 @@ impl Resolve<ReadArgs> for ListDockerContainers {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListDockerContainersResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -377,7 +388,6 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
let servers = resource::list_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await?
@@ -403,55 +413,15 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
}
}
impl Resolve<ReadArgs> for GetDockerContainersSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetDockerContainersSummaryResponse> {
let servers = resource::list_full_for_user::<Server>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
.context("failed to get servers from db")?;
let mut res = GetDockerContainersSummaryResponse::default();
for server in servers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(containers) = &cache.containers {
for container in containers {
res.total += 1;
match container.state {
ContainerStateStatusEnum::Created
| ContainerStateStatusEnum::Paused
| ContainerStateStatusEnum::Exited => res.stopped += 1,
ContainerStateStatusEnum::Running => res.running += 1,
ContainerStateStatusEnum::Empty => res.unknown += 1,
_ => res.unhealthy += 1,
}
}
}
}
Ok(res)
}
}
impl Resolve<ReadArgs> for InspectDockerContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Container> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.inspect(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -488,10 +458,10 @@ impl Resolve<ReadArgs> for GetContainerLog {
tail,
timestamps,
} = self;
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.logs(),
PermissionLevel::Read,
)
.await?;
let res = periphery_client(&server)?
@@ -519,10 +489,10 @@ impl Resolve<ReadArgs> for SearchContainerLog {
invert,
timestamps,
} = self;
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&server,
user,
PermissionLevel::Read.logs(),
PermissionLevel::Read,
)
.await?;
let res = periphery_client(&server)?
@@ -544,10 +514,10 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetResourceMatchingContainerResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
// first check deployments
@@ -605,10 +575,10 @@ impl Resolve<ReadArgs> for ListDockerNetworks {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListDockerNetworksResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -627,10 +597,10 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Network> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -657,10 +627,10 @@ impl Resolve<ReadArgs> for ListDockerImages {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListDockerImagesResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -679,10 +649,10 @@ impl Resolve<ReadArgs> for InspectDockerImage {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Image> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -706,10 +676,10 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Vec<ImageHistoryResponseItem>> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -736,10 +706,10 @@ impl Resolve<ReadArgs> for ListDockerVolumes {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListDockerVolumesResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -758,10 +728,10 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Volume> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -785,10 +755,10 @@ impl Resolve<ReadArgs> for ListComposeProjects {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListComposeProjectsResponse> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
@@ -801,66 +771,3 @@ impl Resolve<ReadArgs> for ListComposeProjects {
}
}
}
#[derive(Default)]
struct TerminalCacheItem {
list: Vec<TerminalInfo>,
ttl: i64,
}
const TERMINAL_CACHE_TIMEOUT: i64 = 30_000;
#[derive(Default)]
struct TerminalCache(
std::sync::Mutex<
HashMap<String, Arc<tokio::sync::Mutex<TerminalCacheItem>>>,
>,
);
impl TerminalCache {
fn get_or_insert(
&self,
server_id: String,
) -> Arc<tokio::sync::Mutex<TerminalCacheItem>> {
if let Some(cached) =
self.0.lock().unwrap().get(&server_id).cloned()
{
return cached;
}
let to_cache =
Arc::new(tokio::sync::Mutex::new(TerminalCacheItem::default()));
self.0.lock().unwrap().insert(server_id, to_cache.clone());
to_cache
}
}
fn terminals_cache() -> &'static TerminalCache {
static TERMINALS: OnceLock<TerminalCache> = OnceLock::new();
TERMINALS.get_or_init(Default::default)
}
impl Resolve<ReadArgs> for ListTerminals {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListTerminalsResponse> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.terminal(),
)
.await?;
let cache = terminals_cache().get_or_insert(server.id.clone());
let mut cache = cache.lock().await;
if self.fresh || komodo_timestamp() > cache.ttl {
cache.list = periphery_client(&server)?
.request(periphery_client::api::terminal::ListTerminals {})
.await
.context("Failed to get fresh terminal list")?;
cache.ttl = komodo_timestamp() + TERMINAL_CACHE_TIMEOUT;
Ok(cache.list.clone())
} else {
Ok(cache.list.clone())
}
}
}

View File

@@ -0,0 +1,97 @@
use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
},
};
use mongo_indexed::Document;
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags, resource, state::db_client,
};
use super::ReadArgs;
impl Resolve<ReadArgs> for GetServerTemplate {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetServerTemplateResponse> {
Ok(
resource::get_check_permissions::<ServerTemplate>(
&self.server_template,
user,
PermissionLevel::Read,
)
.await?,
)
}
}
impl Resolve<ReadArgs> for ListServerTemplates {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListServerTemplatesResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<ServerTemplate>(
self.query, user, &all_tags,
)
.await?,
)
}
}
impl Resolve<ReadArgs> for ListFullServerTemplates {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListFullServerTemplatesResponse> {
let all_tags = if self.query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
Ok(
resource::list_full_for_user::<ServerTemplate>(
self.query, user, &all_tags,
)
.await?,
)
}
}
impl Resolve<ReadArgs> for GetServerTemplatesSummary {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<GetServerTemplatesSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
ServerTemplate,
>(user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
let total = db_client()
.server_templates
.count_documents(query)
.await
.context("failed to count all server template documents")?;
let res = GetServerTemplatesSummaryResponse {
total: total as u32,
};
Ok(res)
}
}

View File

@@ -1,32 +1,25 @@
use std::collections::HashSet;
use anyhow::{Context, anyhow};
use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,
docker::container::Container,
permission::PermissionLevel,
server::{Server, ServerState},
stack::{Stack, StackActionState, StackListItem, StackState},
},
};
use periphery_client::api::{
compose::{GetComposeLog, GetComposeLogSearch},
container::InspectContainer,
use periphery_client::api::compose::{
GetComposeLog, GetComposeLogSearch,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{periphery_client, query::get_all_tags},
permission::get_check_permissions,
resource,
stack::get_stack_and_server,
state::{
action_states, github_client, server_status_cache,
stack_status_cache,
},
state::{action_states, github_client, stack_status_cache},
};
use super::ReadArgs;
@@ -37,10 +30,10 @@ impl Resolve<ReadArgs> for GetStack {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Stack> {
Ok(
get_check_permissions::<Stack>(
resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -52,10 +45,10 @@ impl Resolve<ReadArgs> for ListStackServices {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ListStackServicesResponse> {
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
@@ -82,13 +75,9 @@ impl Resolve<ReadArgs> for GetStackLog {
tail,
timestamps,
} = self;
let (stack, server) = get_stack_and_server(
&stack,
user,
PermissionLevel::Read.logs(),
true,
)
.await?;
let (stack, server) =
get_stack_and_server(&stack, user, PermissionLevel::Read, true)
.await?;
let res = periphery_client(&server)?
.request(GetComposeLog {
project: stack.project_name(false),
@@ -115,13 +104,9 @@ impl Resolve<ReadArgs> for SearchStackLog {
invert,
timestamps,
} = self;
let (stack, server) = get_stack_and_server(
&stack,
user,
PermissionLevel::Read.logs(),
true,
)
.await?;
let (stack, server) =
get_stack_and_server(&stack, user, PermissionLevel::Read, true)
.await?;
let res = periphery_client(&server)?
.request(GetComposeLogSearch {
project: stack.project_name(false),
@@ -137,60 +122,6 @@ impl Resolve<ReadArgs> for SearchStackLog {
}
}
impl Resolve<ReadArgs> for InspectStackContainer {
async fn resolve(
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<Container> {
let InspectStackContainer { stack, service } = self;
let stack = get_check_permissions::<Stack>(
&stack,
user,
PermissionLevel::Read.inspect(),
)
.await?;
if stack.config.server_id.is_empty() {
return Err(
anyhow!("Cannot inspect stack, not attached to any server")
.into(),
);
}
let server =
resource::get::<Server>(&stack.config.server_id).await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(
anyhow!(
"Cannot inspect container: server is {:?}",
cache.state
)
.into(),
);
}
let services = &stack_status_cache()
.get(&stack.id)
.await
.unwrap_or_default()
.curr
.services;
let Some(name) = services
.into_iter()
.find(|s| s.service == service)
.and_then(|s| s.container.as_ref().map(|c| c.name.clone()))
else {
return Err(anyhow!(
"No service found matching '{service}'. Was the stack last deployed manually?"
).into());
};
let res = periphery_client(&server)?
.request(InspectContainer { name })
.await?;
Ok(res)
}
}
impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
async fn resolve(
self,
@@ -202,10 +133,7 @@ impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -236,10 +164,7 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -270,13 +195,9 @@ impl Resolve<ReadArgs> for ListStacks {
get_all_tags(None).await?
};
let only_update_available = self.query.specific.update_available;
let stacks = resource::list_for_user::<Stack>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?;
let stacks =
resource::list_for_user::<Stack>(self.query, user, &all_tags)
.await?;
let stacks = if only_update_available {
stacks
.into_iter()
@@ -307,10 +228,7 @@ impl Resolve<ReadArgs> for ListFullStacks {
};
Ok(
resource::list_full_for_user::<Stack>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -322,10 +240,10 @@ impl Resolve<ReadArgs> for GetStackActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<StackActionState> {
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -346,7 +264,6 @@ impl Resolve<ReadArgs> for GetStacksSummary {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -385,10 +302,10 @@ impl Resolve<ReadArgs> for GetStackWebhooksEnabled {
});
};
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;

View File

@@ -14,7 +14,6 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
permission::get_check_permissions,
resource,
state::{action_states, github_client},
};
@@ -27,10 +26,10 @@ impl Resolve<ReadArgs> for GetResourceSync {
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ResourceSync> {
Ok(
get_check_permissions::<ResourceSync>(
resource::get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?,
)
@@ -49,10 +48,7 @@ impl Resolve<ReadArgs> for ListResourceSyncs {
};
Ok(
resource::list_for_user::<ResourceSync>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -71,10 +67,7 @@ impl Resolve<ReadArgs> for ListFullResourceSyncs {
};
Ok(
resource::list_full_for_user::<ResourceSync>(
self.query,
user,
PermissionLevel::Read.into(),
&all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -86,10 +79,10 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
self,
ReadArgs { user }: &ReadArgs,
) -> serror::Result<ResourceSyncActionState> {
let sync = get_check_permissions::<ResourceSync>(
let sync = resource::get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
@@ -111,7 +104,6 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
resource::list_full_for_user::<ResourceSync>(
Default::default(),
user,
PermissionLevel::Read.into(),
&[],
)
.await
@@ -168,10 +160,10 @@ impl Resolve<ReadArgs> for GetSyncWebhooksEnabled {
});
};
let sync = get_check_permissions::<ResourceSync>(
let sync = resource::get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;

View File

@@ -9,7 +9,8 @@ use komodo_client::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment,
permission::PermissionLevel, procedure::Procedure, repo::Repo,
resource::ResourceQuery, server::Server, stack::Stack,
resource::ResourceQuery, server::Server,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, toml::ResourcesToml, user::User,
},
};
@@ -20,14 +21,12 @@ use crate::{
helpers::query::{
get_all_tags, get_id_to_tags, get_user_user_group_ids,
},
permission::get_check_permissions,
resource,
state::db_client,
sync::{
AllResourcesById,
toml::{ToToml, convert_resource},
user_groups::{convert_user_groups, user_group_to_toml},
variables::variable_to_toml,
toml::{TOML_PRETTY_OPTIONS, ToToml, convert_resource},
user_groups::convert_user_groups,
},
};
@@ -47,7 +46,6 @@ async fn get_all_targets(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -58,7 +56,6 @@ async fn get_all_targets(
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -69,7 +66,6 @@ async fn get_all_targets(
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -80,7 +76,6 @@ async fn get_all_targets(
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -91,7 +86,6 @@ async fn get_all_targets(
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -102,7 +96,6 @@ async fn get_all_targets(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -113,7 +106,6 @@ async fn get_all_targets(
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -124,7 +116,6 @@ async fn get_all_targets(
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -135,18 +126,26 @@ async fn get_all_targets(
resource::list_for_user::<Action>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
user,
PermissionLevel::Read.into(),
&all_tags,
)
.await?
@@ -210,10 +209,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
for target in targets {
match target {
ResourceTarget::Alerter(id) => {
let alerter = get_check_permissions::<Alerter>(
let alerter = resource::get_check_permissions::<Alerter>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
res.alerters.push(convert_resource::<Alerter>(
@@ -224,10 +223,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::ResourceSync(id) => {
let sync = get_check_permissions::<ResourceSync>(
let sync = resource::get_check_permissions::<ResourceSync>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
if sync.config.file_contents.is_empty()
@@ -242,11 +241,25 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
}
ResourceTarget::ServerTemplate(id) => {
let template = resource::get_check_permissions::<
ServerTemplate,
>(&id, user, PermissionLevel::Read)
.await?;
res.server_templates.push(
convert_resource::<ServerTemplate>(
template,
false,
vec![],
&id_to_tags,
),
)
}
ResourceTarget::Server(id) => {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
res.servers.push(convert_resource::<Server>(
@@ -257,12 +270,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::Builder(id) => {
let mut builder = get_check_permissions::<Builder>(
&id,
user,
PermissionLevel::Read.into(),
)
.await?;
let mut builder =
resource::get_check_permissions::<Builder>(
&id,
user,
PermissionLevel::Read,
)
.await?;
Builder::replace_ids(&mut builder, &all);
res.builders.push(convert_resource::<Builder>(
builder,
@@ -272,10 +286,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::Build(id) => {
let mut build = get_check_permissions::<Build>(
let mut build = resource::get_check_permissions::<Build>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
Build::replace_ids(&mut build, &all);
@@ -287,10 +301,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::Deployment(id) => {
let mut deployment = get_check_permissions::<Deployment>(
&id,
user,
PermissionLevel::Read.into(),
let mut deployment = resource::get_check_permissions::<
Deployment,
>(
&id, user, PermissionLevel::Read
)
.await?;
Deployment::replace_ids(&mut deployment, &all);
@@ -302,10 +316,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::Repo(id) => {
let mut repo = get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
Repo::replace_ids(&mut repo, &all);
@@ -317,10 +331,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::Stack(id) => {
let mut stack = get_check_permissions::<Stack>(
let mut stack = resource::get_check_permissions::<Stack>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
Stack::replace_ids(&mut stack, &all);
@@ -332,10 +346,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
))
}
ResourceTarget::Procedure(id) => {
let mut procedure = get_check_permissions::<Procedure>(
&id,
user,
PermissionLevel::Read.into(),
let mut procedure = resource::get_check_permissions::<
Procedure,
>(
&id, user, PermissionLevel::Read
)
.await?;
Procedure::replace_ids(&mut procedure, &all);
@@ -347,10 +361,10 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
));
}
ResourceTarget::Action(id) => {
let mut action = get_check_permissions::<Action>(
let mut action = resource::get_check_permissions::<Action>(
&id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
Action::replace_ids(&mut action, &all);
@@ -489,6 +503,14 @@ fn serialize_resources_toml(
Builder::push_to_toml_string(builder, &mut toml)?;
}
for server_template in resources.server_templates {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[server_template]]\n");
ServerTemplate::push_to_toml_string(server_template, &mut toml)?;
}
for resource_sync in resources.resource_syncs {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
@@ -501,14 +523,22 @@ fn serialize_resources_toml(
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str(&variable_to_toml(variable)?);
toml.push_str("[[variable]]\n");
toml.push_str(
&toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS)
.context("failed to serialize variables to toml")?,
);
}
for user_group in resources.user_groups {
for user_group in &resources.user_groups {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str(&user_group_to_toml(user_group)?);
toml.push_str("[[user_group]]\n");
toml.push_str(
&toml_pretty::to_string(user_group, TOML_PRETTY_OPTIONS)
.context("failed to serialize user_groups to toml")?,
);
}
Ok(toml)

View File

@@ -14,6 +14,7 @@ use komodo_client::{
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
update::{Update, UpdateListItem},
@@ -27,11 +28,7 @@ use mungos::{
};
use resolver_api::Resolve;
use crate::{
config::core_config,
permission::{get_check_permissions, get_resource_ids_for_user},
state::db_client,
};
use crate::{config::core_config, resource, state::db_client};
use super::ReadArgs;
@@ -45,17 +42,18 @@ impl Resolve<ReadArgs> for ListUpdates {
let query = if user.admin || core_config().transparent_mode {
self.query
} else {
let server_query = get_resource_ids_for_user::<Server>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let server_query =
resource::get_resource_ids_for_user::<Server>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Server", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
get_resource_ids_for_user::<Deployment>(user)
resource::get_resource_ids_for_user::<Deployment>(user)
.await?
.map(|ids| {
doc! {
@@ -64,35 +62,38 @@ impl Resolve<ReadArgs> for ListUpdates {
})
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query = get_resource_ids_for_user::<Stack>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let stack_query =
resource::get_resource_ids_for_user::<Stack>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Stack", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query = get_resource_ids_for_user::<Build>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let build_query =
resource::get_resource_ids_for_user::<Build>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Build", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query = get_resource_ids_for_user::<Repo>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let repo_query =
resource::get_resource_ids_for_user::<Repo>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Repo", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
get_resource_ids_for_user::<Procedure>(user)
resource::get_resource_ids_for_user::<Procedure>(user)
.await?
.map(|ids| {
doc! {
@@ -101,43 +102,57 @@ impl Resolve<ReadArgs> for ListUpdates {
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query = get_resource_ids_for_user::<Action>(user)
let action_query =
resource::get_resource_ids_for_user::<Action>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query =
resource::get_resource_ids_for_user::<Builder>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query =
resource::get_resource_ids_for_user::<Alerter>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query =
resource::get_resource_ids_for_user::<ServerTemplate>(user)
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let resource_sync_query =
resource::get_resource_ids_for_user::<ResourceSync>(
user,
)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query = get_resource_ids_for_user::<Builder>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Builder", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query = get_resource_ids_for_user::<Alerter>(user)
.await?
.map(|ids| {
doc! {
"target.type": "Alerter", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let resource_sync_query = get_resource_ids_for_user::<
ResourceSync,
>(user)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = self.query.unwrap_or_default();
query.extend(doc! {
@@ -151,6 +166,7 @@ impl Resolve<ReadArgs> for ListUpdates {
action_query,
alerter_query,
builder_query,
server_template_query,
resource_sync_query,
]
});
@@ -229,82 +245,90 @@ impl Resolve<ReadArgs> for GetUpdate {
);
}
ResourceTarget::Server(id) => {
get_check_permissions::<Server>(
resource::get_check_permissions::<Server>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Deployment(id) => {
get_check_permissions::<Deployment>(
resource::get_check_permissions::<Deployment>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Build(id) => {
get_check_permissions::<Build>(
resource::get_check_permissions::<Build>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Repo(id) => {
get_check_permissions::<Repo>(
resource::get_check_permissions::<Repo>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Builder(id) => {
get_check_permissions::<Builder>(
resource::get_check_permissions::<Builder>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Alerter(id) => {
get_check_permissions::<Alerter>(
resource::get_check_permissions::<Alerter>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Procedure(id) => {
get_check_permissions::<Procedure>(
resource::get_check_permissions::<Procedure>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Action(id) => {
get_check_permissions::<Action>(
resource::get_check_permissions::<Action>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
id,
user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
get_check_permissions::<ResourceSync>(
resource::get_check_permissions::<ResourceSync>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::Stack(id) => {
get_check_permissions::<Stack>(
resource::get_check_permissions::<Stack>(
id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Read,
)
.await?;
}

View File

@@ -1,76 +0,0 @@
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{
api::terminal::ExecuteTerminalBody,
entities::{
permission::PermissionLevel, server::Server, user::User,
},
};
use serror::Json;
use uuid::Uuid;
use crate::{
auth::auth_request, helpers::periphery_client,
permission::get_check_permissions,
};
pub fn router() -> Router {
Router::new()
.route("/execute", post(execute))
.layer(middleware::from_fn(auth_request))
}
async fn execute(
Extension(user): Extension<User>,
Json(request): Json<ExecuteTerminalBody>,
) -> serror::Result<axum::body::Body> {
execute_inner(Uuid::new_v4(), request, user).await
}
#[instrument(
name = "ExecuteTerminal",
skip(user),
fields(
user_id = user.id,
)
)]
async fn execute_inner(
req_id: Uuid,
ExecuteTerminalBody {
server,
terminal,
command,
}: ExecuteTerminalBody,
user: User,
) -> serror::Result<axum::body::Body> {
info!("/terminal request | user: {}", user.username);
let res = async {
let server = get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
let stream = periphery
.execute_terminal(terminal, command)
.await
.context("Failed to execute command on periphery")?;
anyhow::Ok(stream)
}
.await;
let stream = match res {
Ok(stream) => stream,
Err(e) => {
warn!("/terminal request {req_id} error: {e:#}");
return Err(e.into());
}
};
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
}

View File

@@ -1,9 +1,7 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{Context, anyhow};
use axum::{
Extension, Json, Router, extract::Path, middleware, routing::post,
};
use axum::{Extension, Json, Router, middleware, routing::post};
use derive_variants::EnumVariants;
use komodo_client::{
api::user::*,
@@ -14,7 +12,6 @@ use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use typeshare::typeshare;
use uuid::Uuid;
@@ -24,8 +21,6 @@ use crate::{
state::db_client,
};
use super::Variant;
pub struct UserArgs {
pub user: User,
}
@@ -48,22 +43,9 @@ enum UserRequest {
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<axum::response::Response> {
let req: UserRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(user, Json(req)).await
}
#[instrument(name = "UserHandler", level = "debug", skip(user))]
async fn handler(
Extension(user): Extension<User>,

View File

@@ -6,7 +6,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
use crate::resource;
use super::WriteArgs;
@@ -29,12 +29,13 @@ impl Resolve<WriteArgs> for CopyAction {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Action> {
let Action { config, .. } = get_check_permissions::<Action>(
&self.id,
user,
PermissionLevel::Write.into(),
)
.await?;
let Action { config, .. } =
resource::get_check_permissions::<Action>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Action>(&self.name, config.into(), user)
.await?,

View File

@@ -6,7 +6,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
use crate::resource;
use super::WriteArgs;
@@ -29,12 +29,13 @@ impl Resolve<WriteArgs> for CopyAlerter {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Alerter> {
let Alerter { config, .. } = get_check_permissions::<Alerter>(
&self.id,
user,
PermissionLevel::Write.into(),
)
.await?;
let Alerter { config, .. } =
resource::get_check_permissions::<Alerter>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Alerter>(&self.name, config.into(), user)
.await?,

View File

@@ -1,17 +1,12 @@
use std::{path::PathBuf, str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, FileContents, NoData, Operation, all_logs_success,
CloneArgs, NoData,
build::{Build, BuildInfo, PartialBuildConfig},
builder::{Builder, BuilderConfig},
config::core::CoreConfig,
permission::PermissionLevel,
server::ServerState,
update::Update,
},
};
@@ -20,23 +15,11 @@ use mungos::mongodb::bson::to_document;
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::{
PeripheryClient,
api::build::{
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
},
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
config::core_config,
helpers::{
git_token, periphery_client,
query::get_server_with_state,
update::{add_update, make_update},
},
permission::get_check_permissions,
helpers::git_token,
resource,
state::{db_client, github_client},
};
@@ -62,12 +45,13 @@ impl Resolve<WriteArgs> for CopyBuild {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Build> {
let Build { mut config, .. } = get_check_permissions::<Build>(
&self.id,
user,
PermissionLevel::Read.into(),
)
.await?;
let Build { mut config, .. } =
resource::get_check_permissions::<Build>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
// reset version to 0.0.0
config.version = Default::default();
Ok(
@@ -104,184 +88,6 @@ impl Resolve<WriteArgs> for RenameBuild {
}
}
impl Resolve<WriteArgs> for WriteBuildFileContents {
#[instrument(name = "WriteBuildFileContents", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let build = get_check_permissions::<Build>(
&self.build,
&args.user,
PermissionLevel::Write.into(),
)
.await?;
if !build.config.files_on_host && build.config.repo.is_empty() {
return Err(anyhow!(
"Build is not configured to use Files on Host or Git Repo, can't write dockerfile contents"
).into());
}
let mut update =
make_update(&build, Operation::WriteDockerfile, &args.user);
update.push_simple_log("Dockerfile to write", &self.contents);
if build.config.files_on_host {
match get_on_host_periphery(&build)
.await?
.request(WriteDockerfileContentsToHost {
name: build.name,
build_path: build.config.build_path,
dockerfile_path: build.config.dockerfile_path,
contents: self.contents,
})
.await
.context("Failed to write dockerfile contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write Dockerfile Contents",
format_serror(&e.into()),
);
}
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
if let Err(e) =
(RefreshBuildCache { build: build.id }).resolve(args).await
{
update.push_error_log(
"Refresh build cache",
format_serror(&e.error.into()),
);
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
} else {
write_dockerfile_contents_git(self, args, build, update).await
}
}
}
async fn write_dockerfile_contents_git(
req: WriteBuildFileContents,
args: &WriteArgs,
build: Build,
mut update: Update,
) -> serror::Result<Update> {
let WriteBuildFileContents { build: _, contents } = req;
let mut clone_args: CloneArgs = (&build).into();
let root = clone_args.unique_path(&core_config().repo_directory)?;
let build_path = build
.config
.build_path
.parse::<PathBuf>()
.context("Invalid build path")?;
let dockerfile_path = build
.config
.dockerfile_path
.parse::<PathBuf>()
.context("Invalid dockerfile path")?;
let full_path = root.join(&build_path).join(&dockerfile_path);
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize dockerfile parent directory {parent:?}"
)
})?;
}
// Ensure the folder is initialized as git repo.
// This allows a new file to be committed on a branch that may not exist.
if !root.join(".git").exists() {
let access_token = if let Some(account) = &clone_args.account {
git_token(&clone_args.provider, account, |https| clone_args.https = https)
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
)?
} else {
None
};
git::init_folder_as_repo(
&root,
&clone_args,
access_token.as_deref(),
&mut update.logs,
)
.await;
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!("Failed to write dockerfile contents to {full_path:?}")
})
{
update
.push_error_log("Write Dockerfile", format_serror(&e.into()));
} else {
update.push_simple_log(
"Write Dockerfile",
format!("File written to {full_path:?}"),
);
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
let commit_res = git::commit_file(
&format!("{}: Commit Dockerfile", args.user.username),
&root,
&build_path.join(&dockerfile_path),
&build.config.branch,
)
.await;
update.logs.extend(commit_res.logs);
if let Err(e) = (RefreshBuildCache { build: build.name })
.resolve(args)
.await
{
update.push_error_log(
"Refresh build cache",
format_serror(&e.error.into()),
);
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
impl Resolve<WriteArgs> for RefreshBuildCache {
#[instrument(
name = "RefreshBuildCache",
@@ -294,111 +100,62 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
) -> serror::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// build should be able to do this.
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
let (
remote_path,
remote_contents,
remote_error,
latest_hash,
latest_message,
) = if build.config.files_on_host {
// =============
// FILES ON HOST
// =============
match get_on_host_dockerfile(&build).await {
Ok(FileContents { path, contents }) => {
(Some(path), Some(contents), None, None, None)
}
Err(e) => {
(None, None, Some(format_serror(&e.into())), None, None)
}
}
} else if !build.config.repo.is_empty() {
// ================
// REPO BASED BUILD
// ================
if build.config.git_provider.is_empty() {
// Nothing to do here
return Ok(NoData {});
}
let config = core_config();
if build.config.repo.is_empty()
|| build.config.git_provider.is_empty()
{
// Nothing to do here
return Ok(NoData {});
}
let mut clone_args: CloneArgs = (&build).into();
let repo_path =
clone_args.unique_path(&core_config().repo_directory)?;
clone_args.destination = Some(repo_path.display().to_string());
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
let config = core_config();
let access_token = if let Some(username) = &clone_args.account {
git_token(&clone_args.provider, username, |https| {
let mut clone_args: CloneArgs = (&build).into();
let repo_path =
clone_args.unique_path(&core_config().repo_directory)?;
clone_args.destination = Some(repo_path.display().to_string());
// Don't want to run these on core.
clone_args.on_clone = None;
clone_args.on_pull = None;
let access_token = if let Some(username) = &clone_args.account {
git_token(&clone_args.provider, username, |https| {
clone_args.https = https
})
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
)?
} else {
None
};
let GitRes { hash, message, .. } = git::pull_or_clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
&[],
)
.await
.context("failed to clone build repo")?;
let relative_path = PathBuf::from_str(&build.config.build_path)
.context("Invalid build path")?
.join(&build.config.dockerfile_path);
let full_path = repo_path.join(&relative_path);
let (contents, error) = match fs::read_to_string(&full_path)
.await
.with_context(|| {
format!(
"Failed to read dockerfile contents at {full_path:?}"
)
}) {
Ok(contents) => (Some(contents), None),
Err(e) => (None, Some(format_serror(&e.into()))),
};
(
Some(relative_path.display().to_string()),
contents,
error,
hash,
message,
)
} else {
// =============
// UI BASED FILE
// =============
(None, None, None, None, None)
None
};
let GitRes {
hash: latest_hash,
message: latest_message,
..
} = git::pull_or_clone(
clone_args,
&config.repo_directory,
access_token,
&[],
"",
None,
&[],
)
.await
.context("failed to clone build repo")?;
let info = BuildInfo {
last_built_at: build.info.last_built_at,
built_hash: build.info.built_hash,
built_message: build.info.built_message,
built_contents: build.info.built_contents,
remote_path,
remote_contents,
remote_error,
latest_hash,
latest_message,
};
@@ -419,63 +176,6 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
}
}
async fn get_on_host_periphery(
build: &Build,
) -> anyhow::Result<PeripheryClient> {
if build.config.builder_id.is_empty() {
return Err(anyhow!("No builder associated with build"));
}
let builder = resource::get::<Builder>(&build.config.builder_id)
.await
.context("Failed to get builder")?;
match builder.config {
BuilderConfig::Aws(_) => {
Err(anyhow!("Files on host doesn't work with AWS builder"))
}
BuilderConfig::Url(config) => {
let periphery = PeripheryClient::new(
config.address,
config.passkey,
Duration::from_secs(3),
);
periphery.health_check().await?;
Ok(periphery)
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
return Err(anyhow!(
"Builder is type server, but has no server attached"
));
}
let (server, state) =
get_server_with_state(&config.server_id).await?;
if state != ServerState::Ok {
return Err(anyhow!(
"Builder server is disabled or not reachable"
));
};
periphery_client(&server)
}
}
}
/// The successful case will be included as Some(remote_contents).
/// The error case will be included as Some(remote_error)
async fn get_on_host_dockerfile(
build: &Build,
) -> anyhow::Result<FileContents> {
get_on_host_periphery(build)
.await?
.request(GetDockerfileContentsOnHost {
name: build.name.clone(),
build_path: build.config.build_path.clone(),
dockerfile_path: build.config.dockerfile_path.clone(),
})
.await
}
impl Resolve<WriteArgs> for CreateBuildWebhook {
#[instrument(name = "CreateBuildWebhook", skip(args))]
async fn resolve(
@@ -493,10 +193,10 @@ impl Resolve<WriteArgs> for CreateBuildWebhook {
let WriteArgs { user } = args;
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -606,10 +306,10 @@ impl Resolve<WriteArgs> for DeleteBuildWebhook {
);
};
let build = get_check_permissions::<Build>(
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;

View File

@@ -6,7 +6,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
use crate::resource;
use super::WriteArgs;
@@ -29,12 +29,13 @@ impl Resolve<WriteArgs> for CopyBuilder {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Builder> {
let Builder { config, .. } = get_check_permissions::<Builder>(
&self.id,
user,
PermissionLevel::Write.into(),
)
.await?;
let Builder { config, .. } =
resource::get_check_permissions::<Builder>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Builder>(&self.name, config.into(), user)
.await?,

View File

@@ -11,7 +11,7 @@ use komodo_client::{
komodo_timestamp,
permission::PermissionLevel,
server::{Server, ServerState},
to_docker_compatible_name,
to_komodo_name,
update::Update,
},
};
@@ -25,7 +25,6 @@ use crate::{
query::get_deployment_state,
update::{add_update, make_update},
},
permission::get_check_permissions,
resource,
state::{action_states, db_client, server_status_cache},
};
@@ -52,10 +51,10 @@ impl Resolve<WriteArgs> for CopyDeployment {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
let Deployment { config, .. } =
get_check_permissions::<Deployment>(
resource::get_check_permissions::<Deployment>(
&self.id,
user,
PermissionLevel::Read.into(),
PermissionLevel::Write,
)
.await?;
Ok(
@@ -71,10 +70,10 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Read.inspect().attach(),
PermissionLevel::Write,
)
.await?;
let cache = server_status_cache()
@@ -189,10 +188,10 @@ impl Resolve<WriteArgs> for RenameDeployment {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Update> {
let deployment = get_check_permissions::<Deployment>(
let deployment = resource::get_check_permissions::<Deployment>(
&self.id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -207,7 +206,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
let _action_guard =
action_state.update(|state| state.renaming = true)?;
let name = to_docker_compatible_name(&self.name);
let name = to_komodo_name(&self.name);
let container_state = get_deployment_state(&deployment).await?;

View File

@@ -4,7 +4,8 @@ use komodo_client::{
entities::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
repo::Repo, server::Server, server_template::ServerTemplate,
stack::Stack, sync::ResourceSync,
},
};
use resolver_api::Resolve;
@@ -92,6 +93,14 @@ impl Resolve<WriteArgs> for UpdateDescription {
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
resource::update_description::<ServerTemplate>(
&id,
&self.description,
user,
)
.await?;
}
ResourceTarget::ResourceSync(id) => {
resource::update_description::<ResourceSync>(
&id,

View File

@@ -1,23 +1,18 @@
use std::time::Instant;
use anyhow::Context;
use axum::{
Extension, Router, extract::Path, middleware, routing::post,
};
use axum::{Extension, Router, middleware, routing::post};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::write::*, entities::user::User};
use resolver_api::Resolve;
use response::Response;
use serde::{Deserialize, Serialize};
use serde_json::json;
use serror::Json;
use typeshare::typeshare;
use uuid::Uuid;
use crate::auth::auth_request;
use super::Variant;
mod action;
mod alerter;
mod build;
@@ -29,6 +24,7 @@ mod procedure;
mod provider;
mod repo;
mod server;
mod server_template;
mod service_user;
mod stack;
mod sync;
@@ -69,7 +65,6 @@ pub enum WriteRequest {
AddUserToUserGroup(AddUserToUserGroup),
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
SetUsersInUserGroup(SetUsersInUserGroup),
SetEveryoneUserGroup(SetEveryoneUserGroup),
// ==== PERMISSIONS ====
UpdateUserAdmin(UpdateUserAdmin),
@@ -86,20 +81,6 @@ pub enum WriteRequest {
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
CreateTerminal(CreateTerminal),
DeleteTerminal(DeleteTerminal),
DeleteAllTerminals(DeleteAllTerminals),
// ==== STACK ====
CreateStack(CreateStack),
CopyStack(CopyStack),
DeleteStack(DeleteStack),
UpdateStack(UpdateStack),
RenameStack(RenameStack),
WriteStackFileContents(WriteStackFileContents),
RefreshStackCache(RefreshStackCache),
CreateStackWebhook(CreateStackWebhook),
DeleteStackWebhook(DeleteStackWebhook),
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
@@ -115,7 +96,6 @@ pub enum WriteRequest {
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
RenameBuild(RenameBuild),
WriteBuildFileContents(WriteBuildFileContents),
RefreshBuildCache(RefreshBuildCache),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
@@ -127,6 +107,13 @@ pub enum WriteRequest {
UpdateBuilder(UpdateBuilder),
RenameBuilder(RenameBuilder),
// ==== SERVER TEMPLATE ====
CreateServerTemplate(CreateServerTemplate),
CopyServerTemplate(CopyServerTemplate),
DeleteServerTemplate(DeleteServerTemplate),
UpdateServerTemplate(UpdateServerTemplate),
RenameServerTemplate(RenameServerTemplate),
// ==== REPO ====
CreateRepo(CreateRepo),
CopyRepo(CopyRepo),
@@ -170,6 +157,17 @@ pub enum WriteRequest {
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),
// ==== STACK ====
CreateStack(CreateStack),
CopyStack(CopyStack),
DeleteStack(DeleteStack),
UpdateStack(UpdateStack),
RenameStack(RenameStack),
WriteStackFileContents(WriteStackFileContents),
RefreshStackCache(RefreshStackCache),
CreateStackWebhook(CreateStackWebhook),
DeleteStackWebhook(DeleteStackWebhook),
// ==== TAG ====
CreateTag(CreateTag),
DeleteTag(DeleteTag),
@@ -196,22 +194,9 @@ pub enum WriteRequest {
pub fn router() -> Router {
Router::new()
.route("/", post(handler))
.route("/{variant}", post(variant_handler))
.layer(middleware::from_fn(auth_request))
}
async fn variant_handler(
user: Extension<User>,
Path(Variant { variant }): Path<Variant>,
Json(params): Json<serde_json::Value>,
) -> serror::Result<axum::response::Response> {
let req: WriteRequest = serde_json::from_value(json!({
"type": variant,
"params": params,
}))?;
handler(user, Json(req)).await
}
async fn handler(
Extension(user): Extension<User>,
Json(request): Json<WriteRequest>,
@@ -222,6 +207,10 @@ async fn handler(
.await
.context("failure in spawned task");
if let Err(e) = &res {
warn!("/write request {req_id} spawn error: {e:#}");
}
res?
}

View File

@@ -11,7 +11,7 @@ use komodo_client::{
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
mongodb::{
bson::{Document, doc, oid::ObjectId, to_bson},
bson::{Document, doc, oid::ObjectId},
options::UpdateOptions,
},
};
@@ -65,10 +65,6 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<UpdateUserBasePermissionsResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
let UpdateUserBasePermissions {
user_id,
enabled,
@@ -76,6 +72,10 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
create_builds,
} = self;
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
let user = find_one_by_id(&db_client().users, &user_id)
.await
.context("failed to query mongo for user")?
@@ -122,16 +122,16 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<UpdatePermissionOnResourceTypeResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
let Self {
let UpdatePermissionOnResourceType {
user_target,
resource_type,
permission,
} = self;
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
@@ -153,11 +153,9 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
let id = ObjectId::from_str(&user_target_id)
.context("id is not ObjectId")?;
let filter = doc! { "_id": id };
let field = format!("all.{resource_type}");
let set =
to_bson(&permission).context("permission is not Bson")?;
let update = doc! { "$set": { &field: &set } };
let filter = doc! { "_id": id };
let update = doc! { "$set": { &field: permission.as_ref() } };
match user_target_variant {
UserTargetVariant::User => {
@@ -166,7 +164,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {set} on db")
format!("failed to set {field}: {permission} on db")
})?;
}
UserTargetVariant::UserGroup => {
@@ -175,7 +173,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
.update_one(filter, update)
.await
.with_context(|| {
format!("failed to set {field}: {set} on db")
format!("failed to set {field}: {permission} on db")
})?;
}
}
@@ -190,22 +188,19 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<UpdatePermissionOnTargetResponse> {
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
let UpdatePermissionOnTarget {
user_target,
resource_target,
permission,
} = self;
// Some extra checks relevant if user target is an actual User
if !admin.admin {
return Err(anyhow!("this method is admin only").into());
}
// Some extra checks if user target is an actual User
if let UserTarget::User(user_id) = &user_target {
let user = get_user(user_id).await?;
if !user.enabled {
return Err(anyhow!("user not enabled").into());
}
if user.admin {
return Err(
anyhow!(
@@ -214,6 +209,9 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
.into(),
);
}
if !user.enabled {
return Err(anyhow!("user not enabled").into());
}
}
let (user_target_variant, user_target_id) =
@@ -225,9 +223,6 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
let (user_target_variant, resource_variant) =
(user_target_variant.as_ref(), resource_variant.as_ref());
let specific = to_bson(&permission.specific)
.context("permission.specific is not valid Bson")?;
db_client()
.permissions
.update_one(
@@ -243,8 +238,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
"user_target.id": user_target_id,
"resource_target.type": resource_variant,
"resource_target.id": resource_id,
"level": permission.level.as_ref(),
"specific": specific
"level": permission.as_ref(),
}
},
)
@@ -412,6 +406,20 @@ async fn extract_resource_target_with_validation(
.id;
Ok((ResourceTargetVariant::Action, id))
}
ResourceTarget::ServerTemplate(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.server_templates
.find_one(filter)
.await
.context("failed to query db for server templates")?
.context("no matching server template found")?
.id;
Ok((ResourceTargetVariant::ServerTemplate, id))
}
ResourceTarget::ResourceSync(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },

View File

@@ -6,7 +6,7 @@ use komodo_client::{
};
use resolver_api::Resolve;
use crate::{permission::get_check_permissions, resource};
use crate::resource;
use super::WriteArgs;
@@ -30,10 +30,10 @@ impl Resolve<WriteArgs> for CopyProcedure {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<CopyProcedureResponse> {
let Procedure { config, .. } =
get_check_permissions::<Procedure>(
resource::get_check_permissions::<Procedure>(
&self.id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
Ok(

View File

@@ -10,7 +10,7 @@ use komodo_client::{
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo, RepoInfo},
server::Server,
to_path_compatible_name,
to_komodo_name,
update::{Log, Update},
},
};
@@ -28,7 +28,6 @@ use crate::{
git_token, periphery_client,
update::{add_update, make_update},
},
permission::get_check_permissions,
resource,
state::{action_states, db_client, github_client},
};
@@ -51,12 +50,13 @@ impl Resolve<WriteArgs> for CopyRepo {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Repo> {
let Repo { config, .. } = get_check_permissions::<Repo>(
&self.id,
user,
PermissionLevel::Read.into(),
)
.await?;
let Repo { config, .. } =
resource::get_check_permissions::<Repo>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Repo>(&self.name, config.into(), user)
.await?,
@@ -87,10 +87,10 @@ impl Resolve<WriteArgs> for RenameRepo {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Update> {
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -111,7 +111,7 @@ impl Resolve<WriteArgs> for RenameRepo {
let _action_guard =
action_state.update(|state| state.renaming = true)?;
let name = to_path_compatible_name(&self.name);
let name = to_komodo_name(&self.name);
let mut update = make_update(&repo, Operation::RenameRepo, user);
@@ -131,7 +131,7 @@ impl Resolve<WriteArgs> for RenameRepo {
let log = match periphery_client(&server)?
.request(api::git::RenameRepo {
curr_name: to_path_compatible_name(&repo.name),
curr_name: to_komodo_name(&repo.name),
new_name: name.clone(),
})
.await
@@ -169,10 +169,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
) -> serror::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// repo should be able to do this.
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -257,10 +257,10 @@ impl Resolve<WriteArgs> for CreateRepoWebhook {
);
};
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
&args.user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -380,10 +380,10 @@ impl Resolve<WriteArgs> for DeleteRepoWebhook {
);
};
let repo = get_check_permissions::<Repo>(
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;

View File

@@ -1,12 +1,10 @@
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
NoData, Operation,
Operation,
permission::PermissionLevel,
server::Server,
to_docker_compatible_name,
update::{Update, UpdateStatus},
},
};
@@ -18,7 +16,6 @@ use crate::{
periphery_client,
update::{add_update, make_update, update_update},
},
permission::get_check_permissions,
resource,
};
@@ -70,10 +67,10 @@ impl Resolve<WriteArgs> for CreateNetwork {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Update> {
let server = get_check_permissions::<Server>(
let server = resource::get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -86,7 +83,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
match periphery
.request(api::network::CreateNetwork {
name: to_docker_compatible_name(&self.name),
name: self.name,
driver: None,
})
.await
@@ -104,81 +101,3 @@ impl Resolve<WriteArgs> for CreateNetwork {
Ok(update)
}
}
impl Resolve<WriteArgs> for CreateTerminal {
#[instrument(name = "CreateTerminal", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
periphery
.request(api::terminal::CreateTerminal {
name: self.name,
command: self.command,
recreate: self.recreate,
})
.await
.context("Failed to create terminal on periphery")?;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteTerminal {
#[instrument(name = "DeleteTerminal", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
periphery
.request(api::terminal::DeleteTerminal {
terminal: self.terminal,
})
.await
.context("Failed to delete terminal on periphery")?;
Ok(NoData {})
}
}
impl Resolve<WriteArgs> for DeleteAllTerminals {
#[instrument(name = "DeleteAllTerminals", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<NoData> {
let server = get_check_permissions::<Server>(
&self.server,
user,
PermissionLevel::Write.terminal(),
)
.await?;
let periphery = periphery_client(&server)?;
periphery
.request(api::terminal::DeleteAllTerminals {})
.await
.context("Failed to delete all terminals on periphery")?;
Ok(NoData {})
}
}

View File

@@ -0,0 +1,92 @@
use komodo_client::{
api::write::{
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
RenameServerTemplate, UpdateServerTemplate,
},
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
update::Update,
},
};
use resolver_api::Resolve;
use crate::resource;
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateServerTemplate {
#[instrument(name = "CreateServerTemplate", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ServerTemplate> {
Ok(
resource::create::<ServerTemplate>(
&self.name,
self.config,
user,
)
.await?,
)
}
}
impl Resolve<WriteArgs> for CopyServerTemplate {
#[instrument(name = "CopyServerTemplate", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ServerTemplate> {
let ServerTemplate { config, .. } =
resource::get_check_permissions::<ServerTemplate>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<ServerTemplate>(
&self.name,
config.into(),
user,
)
.await?,
)
}
}
impl Resolve<WriteArgs> for DeleteServerTemplate {
#[instrument(name = "DeleteServerTemplate", skip(args))]
async fn resolve(
self,
args: &WriteArgs,
) -> serror::Result<ServerTemplate> {
Ok(resource::delete::<ServerTemplate>(&self.id, args).await?)
}
}
impl Resolve<WriteArgs> for UpdateServerTemplate {
#[instrument(name = "UpdateServerTemplate", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ServerTemplate> {
Ok(
resource::update::<ServerTemplate>(&self.id, self.config, user)
.await?,
)
}
}
impl Resolve<WriteArgs> for RenameServerTemplate {
#[instrument(name = "RenameServerTemplate", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Update> {
Ok(
resource::rename::<ServerTemplate>(&self.id, &self.name, user)
.await?,
)
}
}

View File

@@ -30,7 +30,6 @@ use crate::{
query::get_server_with_state,
update::{add_update, make_update},
},
permission::get_check_permissions,
resource,
stack::{
get_stack_and_server,
@@ -61,12 +60,13 @@ impl Resolve<WriteArgs> for CopyStack {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Stack> {
let Stack { config, .. } = get_check_permissions::<Stack>(
&self.id,
user,
PermissionLevel::Read.into(),
)
.await?;
let Stack { config, .. } =
resource::get_check_permissions::<Stack>(
&self.id,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Stack>(&self.name, config.into(), user)
.await?,
@@ -115,7 +115,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
let (mut stack, server) = get_stack_and_server(
&stack,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
true,
)
.await?;
@@ -211,7 +211,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
}
update.finalize();
update.id = add_update(update.clone()).await?;
add_update(update.clone()).await?;
Ok(update)
}
@@ -229,10 +229,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
) -> serror::Result<NoData> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// stack should be able to do this.
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
@@ -432,10 +432,10 @@ impl Resolve<WriteArgs> for CreateStackWebhook {
);
};
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -552,10 +552,10 @@ impl Resolve<WriteArgs> for DeleteStackWebhook {
);
};
let stack = get_check_permissions::<Stack>(
let stack = resource::get_check_permissions::<Stack>(
&self.stack,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;

View File

@@ -19,12 +19,13 @@ use komodo_client::{
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
SyncDeployUpdate,
},
to_path_compatible_name,
to_komodo_name,
update::{Log, Update},
user::sync_user,
},
@@ -48,7 +49,6 @@ use crate::{
query::get_id_to_tags,
update::{add_update, make_update, update_update},
},
permission::get_check_permissions,
resource,
state::{db_client, github_client},
sync::{
@@ -79,10 +79,10 @@ impl Resolve<WriteArgs> for CopyResourceSync {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
let ResourceSync { config, .. } =
get_check_permissions::<ResourceSync>(
resource::get_check_permissions::<ResourceSync>(
&self.id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
Ok(
@@ -132,38 +132,6 @@ impl Resolve<WriteArgs> for RenameResourceSync {
}
}
impl Resolve<WriteArgs> for WriteSyncFileContents {
#[instrument(name = "WriteSyncFileContents", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let sync = get_check_permissions::<ResourceSync>(
&self.sync,
&args.user,
PermissionLevel::Write.into(),
)
.await?;
if !sync.config.files_on_host && sync.config.repo.is_empty() {
return Err(
anyhow!(
"This method is only for 'files on host' or 'repo' based syncs."
)
.into(),
);
}
let mut update =
make_update(&sync, Operation::WriteSyncContents, &args.user);
update.push_simple_log("File contents", &self.contents);
if sync.config.files_on_host {
write_sync_file_contents_on_host(self, args, sync, update).await
} else {
write_sync_file_contents_git(self, args, sync, update).await
}
}
}
async fn write_sync_file_contents_on_host(
req: WriteSyncFileContents,
args: &WriteArgs,
@@ -179,7 +147,7 @@ async fn write_sync_file_contents_on_host(
let root = core_config()
.sync_directory
.join(to_path_compatible_name(&sync.name));
.join(to_komodo_name(&sync.name));
let file_path =
file_path.parse::<PathBuf>().context("Invalid file path")?;
let resource_path = resource_path
@@ -330,7 +298,7 @@ async fn write_sync_file_contents_git(
.await
{
update.push_error_log(
"Refresh sync pending",
"Refresh failed",
format_serror(&e.error.into()),
);
}
@@ -341,16 +309,45 @@ async fn write_sync_file_contents_git(
Ok(update)
}
impl Resolve<WriteArgs> for WriteSyncFileContents {
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let sync = resource::get_check_permissions::<ResourceSync>(
&self.sync,
&args.user,
PermissionLevel::Write,
)
.await?;
if !sync.config.files_on_host && sync.config.repo.is_empty() {
return Err(
anyhow!(
"This method is only for 'files on host' or 'repo' based syncs."
)
.into(),
);
}
let mut update =
make_update(&sync, Operation::WriteSyncContents, &args.user);
update.push_simple_log("File contents", &self.contents);
if sync.config.files_on_host {
write_sync_file_contents_on_host(self, args, sync, update).await
} else {
write_sync_file_contents_git(self, args, sync, update).await
}
}
}
impl Resolve<WriteArgs> for CommitSync {
#[instrument(name = "CommitSync", skip(args))]
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
let WriteArgs { user } = args;
let sync = get_check_permissions::<entities::sync::ResourceSync>(
&self.sync,
user,
PermissionLevel::Write.into(),
)
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&self.sync, user, PermissionLevel::Write)
.await?;
let file_contents_empty = sync.config.file_contents_empty();
@@ -414,7 +411,7 @@ impl Resolve<WriteArgs> for CommitSync {
};
let file_path = core_config()
.sync_directory
.join(to_path_compatible_name(&sync.name))
.join(to_komodo_name(&sync.name))
.join(&resource_path);
if let Some(parent) = file_path.parent() {
fs::create_dir_all(parent)
@@ -517,13 +514,10 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
) -> serror::Result<ResourceSync> {
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
// sync should be able to do this.
let mut sync =
get_check_permissions::<entities::sync::ResourceSync>(
&self.sync,
user,
PermissionLevel::Execute.into(),
)
.await?;
let mut sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&self.sync, user, PermissionLevel::Execute)
.await?;
if !sync.config.managed
&& !sync.config.files_on_host
@@ -691,6 +685,17 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
&mut diffs,
)
.await?;
push_updates_for_view::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ResourceSync>(
resources.resource_syncs,
delete,
@@ -823,9 +828,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
.context("failed to open existing pending resource sync updates alert")
.inspect_err(|e| warn!("{e:#}"))
.ok();
if sync.config.pending_alert {
send_alerts(&[alert]).await;
}
send_alerts(&[alert]).await;
}
// CLOSE ALERT
(Some(existing), false) => {
@@ -870,10 +873,10 @@ impl Resolve<WriteArgs> for CreateSyncWebhook {
);
};
let sync = get_check_permissions::<ResourceSync>(
let sync = resource::get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -990,10 +993,10 @@ impl Resolve<WriteArgs> for DeleteSyncWebhook {
);
};
let sync = get_check_permissions::<ResourceSync>(
let sync = resource::get_check_permissions::<ResourceSync>(
&self.sync,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;

View File

@@ -17,6 +17,7 @@ use komodo_client::{
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
tag::{Tag, TagColor},
@@ -30,7 +31,6 @@ use resolver_api::Resolve;
use crate::{
helpers::query::{get_tag, get_tag_check_owner},
permission::get_check_permissions,
resource,
state::db_client,
};
@@ -131,6 +131,7 @@ impl Resolve<WriteArgs> for DeleteTag {
resource::remove_tag_from_all::<Builder>(&self.id),
resource::remove_tag_from_all::<Alerter>(&self.id),
resource::remove_tag_from_all::<Procedure>(&self.id),
resource::remove_tag_from_all::<ServerTemplate>(&self.id),
)?;
delete_one_by_id(&db_client().tags, &self.id, None).await?;
@@ -151,94 +152,104 @@ impl Resolve<WriteArgs> for UpdateTagsOnResource {
return Err(anyhow!("Invalid target type: System").into());
}
ResourceTarget::Build(id) => {
get_check_permissions::<Build>(
resource::get_check_permissions::<Build>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Build>(&id, self.tags, args).await?;
}
ResourceTarget::Builder(id) => {
get_check_permissions::<Builder>(
resource::get_check_permissions::<Builder>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Builder>(&id, self.tags, args).await?
}
ResourceTarget::Deployment(id) => {
get_check_permissions::<Deployment>(
resource::get_check_permissions::<Deployment>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Deployment>(&id, self.tags, args)
.await?
}
ResourceTarget::Server(id) => {
get_check_permissions::<Server>(
resource::get_check_permissions::<Server>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Server>(&id, self.tags, args).await?
}
ResourceTarget::Repo(id) => {
get_check_permissions::<Repo>(
resource::get_check_permissions::<Repo>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Repo>(&id, self.tags, args).await?
}
ResourceTarget::Alerter(id) => {
get_check_permissions::<Alerter>(
resource::get_check_permissions::<Alerter>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Alerter>(&id, self.tags, args).await?
}
ResourceTarget::Procedure(id) => {
get_check_permissions::<Procedure>(
resource::get_check_permissions::<Procedure>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Procedure>(&id, self.tags, args)
.await?
}
ResourceTarget::Action(id) => {
get_check_permissions::<Action>(
resource::get_check_permissions::<Action>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Action>(&id, self.tags, args).await?
}
ResourceTarget::ResourceSync(id) => {
get_check_permissions::<ResourceSync>(
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<ServerTemplate>(&id, self.tags, args)
.await?
}
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
&id,
user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<ResourceSync>(&id, self.tags, args)
.await?
}
ResourceTarget::Stack(id) => {
get_check_permissions::<Stack>(
resource::get_check_permissions::<Stack>(
&id,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Stack>(&id, self.tags, args).await?

View File

@@ -2,7 +2,10 @@ use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::*,
api::write::{
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup,
},
entities::{komodo_timestamp, user_group::UserGroup},
};
use mungos::{
@@ -17,7 +20,6 @@ use crate::state::db_client;
use super::WriteArgs;
impl Resolve<WriteArgs> for CreateUserGroup {
#[instrument(name = "CreateUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -26,12 +28,11 @@ impl Resolve<WriteArgs> for CreateUserGroup {
return Err(anyhow!("This call is admin-only").into());
}
let user_group = UserGroup {
name: self.name,
id: Default::default(),
everyone: Default::default(),
users: Default::default(),
all: Default::default(),
updated_at: komodo_timestamp(),
name: self.name,
};
let db = db_client();
let id = db
@@ -52,7 +53,6 @@ impl Resolve<WriteArgs> for CreateUserGroup {
}
impl Resolve<WriteArgs> for RenameUserGroup {
#[instrument(name = "RenameUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -78,7 +78,6 @@ impl Resolve<WriteArgs> for RenameUserGroup {
}
impl Resolve<WriteArgs> for DeleteUserGroup {
#[instrument(name = "DeleteUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -111,7 +110,6 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
}
impl Resolve<WriteArgs> for AddUserToUserGroup {
#[instrument(name = "AddUserToUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -155,7 +153,6 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
}
impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
#[instrument(name = "RemoveUserFromUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -199,7 +196,6 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
}
impl Resolve<WriteArgs> for SetUsersInUserGroup {
#[instrument(name = "SetUsersInUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
@@ -244,33 +240,3 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
Ok(res)
}
}
impl Resolve<WriteArgs> for SetEveryoneUserGroup {
#[instrument(name = "SetEveryoneUserGroup", skip(admin), fields(admin = admin.username))]
async fn resolve(
self,
WriteArgs { user: admin }: &WriteArgs,
) -> serror::Result<UserGroup> {
if !admin.admin {
return Err(anyhow!("This call is admin-only").into());
}
let db = db_client();
let filter = match ObjectId::from_str(&self.user_group) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": &self.user_group },
};
db.user_groups
.update_one(filter.clone(), doc! { "$set": { "everyone": self.everyone } })
.await
.context("failed to set everyone on user group")?;
let res = db
.user_groups
.find_one(filter)
.await
.context("failed to query db for UserGroups")?
.context("no user group with given id")?;
Ok(res)
}
}

View File

@@ -1,4 +1,4 @@
use std::time::Duration;
use std::{str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use aws_config::{BehaviorVersion, Region};
@@ -8,15 +8,15 @@ use aws_sdk_ec2::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,
InstanceStateName, InstanceStatus, InstanceType, ResourceType,
Tag, TagSpecification,
Tag, TagSpecification, VolumeType,
},
};
use base64::Engine;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
builder::AwsBuilderConfig,
komodo_timestamp,
server_template::aws::AwsServerTemplateConfig,
};
use crate::{alert::send_alerts, config::core_config};
@@ -71,12 +71,12 @@ async fn create_ec2_client(region: String) -> Client {
#[instrument]
pub async fn launch_ec2_instance(
name: &str,
config: &AwsBuilderConfig,
config: AwsServerTemplateConfig,
) -> anyhow::Result<Ec2Instance> {
let AwsBuilderConfig {
let AwsServerTemplateConfig {
region,
instance_type,
volume_gb,
volumes,
ami_id,
subnet_id,
security_group_ids,
@@ -86,22 +86,19 @@ pub async fn launch_ec2_instance(
user_data,
port: _,
use_https: _,
git_providers: _,
docker_registries: _,
secrets: _,
} = config;
let instance_type = handle_unknown_instance_type(
InstanceType::from(instance_type.as_str()),
)?;
let client = create_ec2_client(region.clone()).await;
let req = client
let mut req = client
.run_instances()
.image_id(ami_id)
.instance_type(instance_type)
.network_interfaces(
InstanceNetworkInterfaceSpecification::builder()
.subnet_id(subnet_id)
.associate_public_ip_address(*assign_public_ip)
.associate_public_ip_address(assign_public_ip)
.set_groups(security_group_ids.to_vec().into())
.device_index(0)
.build(),
@@ -113,17 +110,6 @@ pub async fn launch_ec2_instance(
.resource_type(ResourceType::Instance)
.build(),
)
.block_device_mappings(
BlockDeviceMapping::builder()
.set_device_name("/dev/sda1".to_string().into())
.set_ebs(
EbsBlockDevice::builder()
.volume_size(*volume_gb)
.build()
.into(),
)
.build(),
)
.min_count(1)
.max_count(1)
.user_data(
@@ -131,6 +117,26 @@ pub async fn launch_ec2_instance(
.encode(user_data),
);
for volume in volumes {
let ebs = EbsBlockDevice::builder()
.volume_size(volume.size_gb)
.volume_type(
VolumeType::from_str(volume.volume_type.as_ref())
.context("invalid volume type")?,
)
.set_iops((volume.iops != 0).then_some(volume.iops))
.set_throughput(
(volume.throughput != 0).then_some(volume.throughput),
)
.build();
req = req.block_device_mappings(
BlockDeviceMapping::builder()
.set_device_name(volume.device_name.into())
.set_ebs(ebs.into())
.build(),
)
}
let res = req
.send()
.await
@@ -150,7 +156,7 @@ pub async fn launch_ec2_instance(
let state_name =
get_ec2_instance_state_name(&client, &instance_id).await?;
if state_name == Some(InstanceStateName::Running) {
let ip = if *use_public_ip {
let ip = if use_public_ip {
get_ec2_instance_public_ip(&client, &instance_id).await?
} else {
instance

View File

@@ -0,0 +1,157 @@
use anyhow::{Context, anyhow};
use axum::http::{HeaderName, HeaderValue};
use reqwest::{RequestBuilder, StatusCode};
use serde::{Serialize, de::DeserializeOwned};
use super::{
common::{
HetznerActionResponse, HetznerDatacenterResponse,
HetznerServerResponse, HetznerVolumeResponse,
},
create_server::{CreateServerBody, CreateServerResponse},
create_volume::{CreateVolumeBody, CreateVolumeResponse},
};
const BASE_URL: &str = "https://api.hetzner.cloud/v1";
pub struct HetznerClient(reqwest::Client);
impl HetznerClient {
pub fn new(token: &str) -> HetznerClient {
HetznerClient(
reqwest::ClientBuilder::new()
.default_headers(
[(
HeaderName::from_static("authorization"),
HeaderValue::from_str(&format!("Bearer {token}"))
.unwrap(),
)]
.into_iter()
.collect(),
)
.build()
.context("failed to build Hetzner request client")
.unwrap(),
)
}
pub async fn get_server(
&self,
id: i64,
) -> anyhow::Result<HetznerServerResponse> {
self.get(&format!("/servers/{id}")).await
}
pub async fn create_server(
&self,
body: &CreateServerBody,
) -> anyhow::Result<CreateServerResponse> {
self.post("/servers", body).await
}
#[allow(unused)]
pub async fn delete_server(
&self,
id: i64,
) -> anyhow::Result<HetznerActionResponse> {
self.delete(&format!("/servers/{id}")).await
}
pub async fn get_volume(
&self,
id: i64,
) -> anyhow::Result<HetznerVolumeResponse> {
self.get(&format!("/volumes/{id}")).await
}
pub async fn create_volume(
&self,
body: &CreateVolumeBody,
) -> anyhow::Result<CreateVolumeResponse> {
self.post("/volumes", body).await
}
#[allow(unused)]
pub async fn delete_volume(&self, id: i64) -> anyhow::Result<()> {
let res = self
.0
.delete(format!("{BASE_URL}/volumes/{id}"))
.send()
.await
.context("failed at request to delete volume")?;
let status = res.status();
if status == StatusCode::NO_CONTENT {
Ok(())
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
Err(anyhow!("{status} | {text}"))
}
}
#[allow(unused)]
pub async fn list_datacenters(
&self,
) -> anyhow::Result<HetznerDatacenterResponse> {
self.get("/datacenters").await
}
async fn get<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.get(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at GET request to Hetzner | path: {path}")
})
}
async fn post<Body: Serialize, Res: DeserializeOwned>(
&self,
path: &str,
body: &Body,
) -> anyhow::Result<Res> {
let req = self.0.post(format!("{BASE_URL}{path}")).json(&body);
handle_req(req).await.with_context(|| {
format!("failed at POST request to Hetzner | path: {path}")
})
}
#[allow(unused)]
async fn delete<Res: DeserializeOwned>(
&self,
path: &str,
) -> anyhow::Result<Res> {
let req = self.0.delete(format!("{BASE_URL}{path}"));
handle_req(req).await.with_context(|| {
format!("failed at DELETE request to Hetzner | path: {path}")
})
}
}
async fn handle_req<Res: DeserializeOwned>(
req: RequestBuilder,
) -> anyhow::Result<Res> {
let res = req.send().await?;
let status = res.status();
if status.is_success() {
res.json().await.context("failed to parse response to json")
} else {
let text = res
.text()
.await
.context("failed to get response body as text")?;
if let Ok(json_error) =
serde_json::from_str::<serde_json::Value>(&text)
{
return Err(anyhow!("{status} | {json_error:?}"));
}
Err(anyhow!("{status} | {text}"))
}
}

View File

@@ -0,0 +1,280 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerResponse {
pub server: HetznerServer,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServer {
pub id: i64,
pub name: String,
pub primary_disk_size: f64,
pub image: Option<HetznerImage>,
pub private_net: Vec<HetznerPrivateNet>,
pub public_net: HetznerPublicNet,
pub server_type: HetznerServerTypeDetails,
pub status: HetznerServerStatus,
#[serde(default)]
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerServerTypeDetails {
pub architecture: String,
pub cores: i64,
pub cpu_type: String,
pub description: String,
pub disk: f64,
pub id: i64,
pub memory: f64,
pub name: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPrivateNet {
pub alias_ips: Vec<String>,
pub ip: String,
pub mac_address: String,
pub network: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerPublicNet {
#[serde(default)]
pub firewalls: Vec<HetznerFirewall>,
pub floating_ips: Vec<i64>,
pub ipv4: Option<HetznerIpv4>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerFirewall {
pub id: i64,
pub status: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerIpv4 {
pub id: Option<i64>,
pub blocked: bool,
pub dns_ptr: String,
pub ip: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerImage {
pub id: i64,
pub description: String,
pub name: Option<String>,
pub os_flavor: String,
pub os_version: Option<String>,
pub rapid_deploy: Option<bool>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerActionResponse {
pub action: HetznerAction,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerAction {
pub command: String,
pub error: Option<HetznerError>,
pub finished: Option<String>,
pub id: i64,
pub progress: i32,
pub resources: Vec<HetznerResource>,
pub started: String,
pub status: HetznerActionStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerError {
pub code: String,
pub message: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerResource {
pub id: i64,
#[serde(rename = "type")]
pub ty: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolumeResponse {
pub volume: HetznerVolume,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerVolume {
/// Name of the Resource. Must be unique per Project.
pub name: String,
/// Point in time when the Resource was created (in ISO-8601 format).
pub created: String,
/// Filesystem of the Volume if formatted on creation, null if not formatted on creation
pub format: Option<HetznerVolumeFormat>,
/// ID of the Volume.
pub id: i64,
/// User-defined labels ( key/value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Device path on the file system for the Volume
pub linux_device: String,
/// Protection configuration for the Resource.
pub protection: HetznerProtection,
/// ID of the Server the Volume is attached to, null if it is not attached at all
pub server: Option<i64>,
/// Size in GB of the Volume
pub size: i64,
/// Current status of the Volume. Allowed: `creating`, `available`
pub status: HetznerVolumeStatus,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerProtection {
/// Prevent the Resource from being deleted.
pub delete: bool,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterResponse {
pub datacenters: Vec<HetznerDatacenterDetails>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct HetznerDatacenterDetails {
pub id: i64,
pub name: String,
pub location: serde_json::Map<String, serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum HetznerLocation {
#[serde(rename = "nbg1")]
Nuremberg1,
#[serde(rename = "hel1")]
Helsinki1,
#[serde(rename = "fsn1")]
Falkenstein1,
#[serde(rename = "ash")]
Ashburn,
#[serde(rename = "hil")]
Hillsboro,
#[serde(rename = "sin")]
Singapore,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum HetznerDatacenter {
#[serde(rename = "nbg1-dc3")]
Nuremberg1Dc3,
#[serde(rename = "hel1-dc2")]
Helsinki1Dc2,
#[serde(rename = "fsn1-dc14")]
Falkenstein1Dc14,
#[serde(rename = "ash-dc1")]
AshburnDc1,
#[serde(rename = "hil-dc1")]
HillsboroDc1,
#[serde(rename = "sin-dc1")]
SingaporeDc1,
}
impl From<HetznerDatacenter> for HetznerLocation {
fn from(value: HetznerDatacenter) -> Self {
match value {
HetznerDatacenter::Nuremberg1Dc3 => HetznerLocation::Nuremberg1,
HetznerDatacenter::Helsinki1Dc2 => HetznerLocation::Helsinki1,
HetznerDatacenter::Falkenstein1Dc14 => {
HetznerLocation::Falkenstein1
}
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
HetznerDatacenter::SingaporeDc1 => HetznerLocation::Singapore,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeFormat {
Xfs,
Ext4,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerVolumeStatus {
Creating,
Available,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerServerStatus {
Running,
Initializing,
Starting,
Stopping,
Off,
Deleting,
Migrating,
Rebuilding,
Unknown,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum HetznerActionStatus {
Running,
Success,
Error,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "UPPERCASE")]
#[allow(clippy::enum_variant_names)]
pub enum HetznerServerType {
// Shared
#[serde(rename = "cpx11")]
SharedAmd2Core2Ram40Disk,
#[serde(rename = "cax11")]
SharedArm2Core4Ram40Disk,
#[serde(rename = "cx22")]
SharedIntel2Core4Ram40Disk,
#[serde(rename = "cpx21")]
SharedAmd3Core4Ram80Disk,
#[serde(rename = "cax21")]
SharedArm4Core8Ram80Disk,
#[serde(rename = "cx32")]
SharedIntel4Core8Ram80Disk,
#[serde(rename = "cpx31")]
SharedAmd4Core8Ram160Disk,
#[serde(rename = "cax31")]
SharedArm8Core16Ram160Disk,
#[serde(rename = "cx42")]
SharedIntel8Core16Ram160Disk,
#[serde(rename = "cpx41")]
SharedAmd8Core16Ram240Disk,
#[serde(rename = "cax41")]
SharedArm16Core32Ram320Disk,
#[serde(rename = "cx52")]
SharedIntel16Core32Ram320Disk,
#[serde(rename = "cpx51")]
SharedAmd16Core32Ram360Disk,
// Dedicated
#[serde(rename = "ccx13")]
DedicatedAmd2Core8Ram80Disk,
#[serde(rename = "ccx23")]
DedicatedAmd4Core16Ram160Disk,
#[serde(rename = "ccx33")]
DedicatedAmd8Core32Ram240Disk,
#[serde(rename = "ccx43")]
DedicatedAmd16Core64Ram360Disk,
#[serde(rename = "ccx53")]
DedicatedAmd32Core128Ram600Disk,
#[serde(rename = "ccx63")]
DedicatedAmd48Core192Ram960Disk,
}

View File

@@ -0,0 +1,75 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerDatacenter, HetznerLocation, HetznerServer,
HetznerServerType,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateServerBody {
/// Name of the Server to create (must be unique per Project and a valid hostname as per RFC 1123)
pub name: String,
/// Auto-mount Volumes after attach
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// ID or name of Datacenter to create Server in (must not be used together with location)
#[serde(skip_serializing_if = "Option::is_none")]
pub datacenter: Option<HetznerDatacenter>,
/// ID or name of Location to create Server in (must not be used together with datacenter)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Firewalls which should be applied on the Server's public network interface at creation time
pub firewalls: Vec<Firewall>,
/// ID or name of the Image the Server is created from
pub image: String,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Network IDs which should be attached to the Server private network interface at the creation time
pub networks: Vec<i64>,
/// ID of the Placement Group the server should be in
#[serde(skip_serializing_if = "Option::is_none")]
pub placement_group: Option<i64>,
/// Public Network options
pub public_net: PublicNet,
/// ID or name of the Server type this Server should be created with
pub server_type: HetznerServerType,
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
pub ssh_keys: Vec<String>,
/// This automatically triggers a Power on a Server-Server Action after the creation is finished and is returned in the next_actions response object.
pub start_after_create: bool,
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
#[serde(skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
/// Volume IDs which should be attached to the Server at the creation time. Volumes must be in the same Location.
pub volumes: Vec<i64>,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct Firewall {
/// ID of the Firewall
pub firewall: i64,
}
#[derive(Debug, Clone, Copy, Serialize)]
pub struct PublicNet {
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
pub enable_ipv4: bool,
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
pub enable_ipv6: bool,
/// ID of the ipv4 Primary IP to use. If omitted and enable_ipv4 is true, a new ipv4 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv4: Option<i64>,
/// ID of the ipv6 Primary IP to use. If omitted and enable_ipv6 is true, a new ipv6 Primary IP will automatically be created.
#[serde(skip_serializing_if = "Option::is_none")]
pub ipv6: Option<i64>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateServerResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub root_password: Option<String>,
pub server: HetznerServer,
}

View File

@@ -0,0 +1,36 @@
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use super::common::{
HetznerAction, HetznerLocation, HetznerVolume, HetznerVolumeFormat,
};
#[derive(Debug, Clone, Serialize)]
pub struct CreateVolumeBody {
/// Name of the volume
pub name: String,
/// Auto-mount Volume after attach. server must be provided.
#[serde(skip_serializing_if = "Option::is_none")]
pub automount: Option<bool>,
/// Format Volume after creation. One of: xfs, ext4
#[serde(skip_serializing_if = "Option::is_none")]
pub format: Option<HetznerVolumeFormat>,
/// User-defined labels (key-value pairs) for the Resource
pub labels: HashMap<String, String>,
/// Location to create the Volume in (can be omitted if Server is specified)
#[serde(skip_serializing_if = "Option::is_none")]
pub location: Option<HetznerLocation>,
/// Server to which to attach the Volume once it's created (Volume will be created in the same Location as the server)
#[serde(skip_serializing_if = "Option::is_none")]
pub server: Option<i64>,
/// Size of the Volume in GB
pub size: i64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateVolumeResponse {
pub action: HetznerAction,
pub next_actions: Vec<HetznerAction>,
pub volume: HetznerVolume,
}

View File

@@ -0,0 +1,281 @@
use std::{
sync::{Arc, Mutex, OnceLock},
time::Duration,
};
use anyhow::{Context, anyhow};
use futures::future::join_all;
use komodo_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
HetznerVolumeFormat,
};
use crate::{
cloud::hetzner::{
common::HetznerServerStatus, create_server::CreateServerBody,
create_volume::CreateVolumeBody,
},
config::core_config,
};
use self::{client::HetznerClient, common::HetznerVolumeStatus};
mod client;
mod common;
mod create_server;
mod create_volume;
fn hetzner() -> Option<&'static HetznerClient> {
static HETZNER_CLIENT: OnceLock<Option<HetznerClient>> =
OnceLock::new();
HETZNER_CLIENT
.get_or_init(|| {
let token = &core_config().hetzner.token;
(!token.is_empty()).then(|| HetznerClient::new(token))
})
.as_ref()
}
pub struct HetznerServerMinimal {
pub id: i64,
pub ip: String,
}
const POLL_RATE_SECS: u64 = 3;
const MAX_POLL_TRIES: usize = 100;
#[instrument]
pub async fn launch_hetzner_server(
name: &str,
config: HetznerServerTemplateConfig,
) -> anyhow::Result<HetznerServerMinimal> {
let hetzner =
*hetzner().as_ref().context("Hetzner token not configured")?;
let HetznerServerTemplateConfig {
image,
datacenter,
private_network_ids,
placement_group,
enable_public_ipv4,
enable_public_ipv6,
firewall_ids,
server_type,
ssh_keys,
user_data,
use_public_ip,
labels,
volumes,
port: _,
use_https: _,
} = config;
let datacenter = hetzner_datacenter(datacenter);
// Create volumes and get their ids
let mut volume_ids = Vec::new();
for volume in volumes {
let body = CreateVolumeBody {
name: volume.name,
format: Some(hetzner_format(volume.format)),
location: Some(datacenter.into()),
labels: volume.labels,
size: volume.size_gb,
automount: None,
server: None,
};
let id = hetzner
.create_volume(&body)
.await
.context("failed to create hetzner volume")?
.volume
.id;
volume_ids.push(id);
}
// Make sure volumes are available before continue
let vol_ids_poll = Arc::new(Mutex::new(volume_ids.clone()));
for _ in 0..MAX_POLL_TRIES {
if vol_ids_poll.lock().unwrap().is_empty() {
break;
}
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let ids = vol_ids_poll.lock().unwrap().clone();
let futures = ids.into_iter().map(|id| {
let vol_ids = vol_ids_poll.clone();
async move {
let Ok(res) = hetzner.get_volume(id).await else {
return;
};
if matches!(res.volume.status, HetznerVolumeStatus::Available)
{
vol_ids.lock().unwrap().retain(|_id| *_id != id);
}
}
});
join_all(futures).await;
}
if !vol_ids_poll.lock().unwrap().is_empty() {
return Err(anyhow!("Volumes not ready after poll"));
}
let body = CreateServerBody {
name: name.to_string(),
automount: None,
datacenter: Some(datacenter),
location: None,
firewalls: firewall_ids
.into_iter()
.map(|firewall| create_server::Firewall { firewall })
.collect(),
image,
labels,
networks: private_network_ids,
placement_group: (placement_group > 0).then_some(placement_group),
public_net: create_server::PublicNet {
enable_ipv4: enable_public_ipv4,
enable_ipv6: enable_public_ipv6,
ipv4: None,
ipv6: None,
},
server_type: hetzner_server_type(server_type),
ssh_keys,
start_after_create: true,
user_data: (!user_data.is_empty()).then_some(user_data),
volumes: volume_ids,
};
let server_id = hetzner
.create_server(&body)
.await
.context("failed to create hetnzer server")?
.server
.id;
for _ in 0..MAX_POLL_TRIES {
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
let Ok(res) = hetzner.get_server(server_id).await else {
continue;
};
if matches!(res.server.status, HetznerServerStatus::Running) {
let ip = if use_public_ip {
res
.server
.public_net
.ipv4
.context("instance does not have public ipv4 attached")?
.ip
} else {
res
.server
.private_net
.first()
.context("no private networks attached")?
.ip
.to_string()
};
let server = HetznerServerMinimal { id: server_id, ip };
return Ok(server);
}
}
Err(anyhow!(
"failed to verify server running after polling status"
))
}
fn hetzner_format(
format: HetznerVolumeFormat,
) -> common::HetznerVolumeFormat {
match format {
HetznerVolumeFormat::Xfs => common::HetznerVolumeFormat::Xfs,
HetznerVolumeFormat::Ext4 => common::HetznerVolumeFormat::Ext4,
}
}
fn hetzner_datacenter(
datacenter: HetznerDatacenter,
) -> common::HetznerDatacenter {
match datacenter {
HetznerDatacenter::Nuremberg1Dc3 => {
common::HetznerDatacenter::Nuremberg1Dc3
}
HetznerDatacenter::Helsinki1Dc2 => {
common::HetznerDatacenter::Helsinki1Dc2
}
HetznerDatacenter::Falkenstein1Dc14 => {
common::HetznerDatacenter::Falkenstein1Dc14
}
HetznerDatacenter::AshburnDc1 => {
common::HetznerDatacenter::AshburnDc1
}
HetznerDatacenter::HillsboroDc1 => {
common::HetznerDatacenter::HillsboroDc1
}
HetznerDatacenter::SingaporeDc1 => {
common::HetznerDatacenter::SingaporeDc1
}
}
}
fn hetzner_server_type(
server_type: HetznerServerType,
) -> common::HetznerServerType {
match server_type {
HetznerServerType::SharedAmd2Core2Ram40Disk => {
common::HetznerServerType::SharedAmd2Core2Ram40Disk
}
HetznerServerType::SharedArm2Core4Ram40Disk => {
common::HetznerServerType::SharedArm2Core4Ram40Disk
}
HetznerServerType::SharedIntel2Core4Ram40Disk => {
common::HetznerServerType::SharedIntel2Core4Ram40Disk
}
HetznerServerType::SharedAmd3Core4Ram80Disk => {
common::HetznerServerType::SharedAmd3Core4Ram80Disk
}
HetznerServerType::SharedArm4Core8Ram80Disk => {
common::HetznerServerType::SharedArm4Core8Ram80Disk
}
HetznerServerType::SharedIntel4Core8Ram80Disk => {
common::HetznerServerType::SharedIntel4Core8Ram80Disk
}
HetznerServerType::SharedAmd4Core8Ram160Disk => {
common::HetznerServerType::SharedAmd4Core8Ram160Disk
}
HetznerServerType::SharedArm8Core16Ram160Disk => {
common::HetznerServerType::SharedArm8Core16Ram160Disk
}
HetznerServerType::SharedIntel8Core16Ram160Disk => {
common::HetznerServerType::SharedIntel8Core16Ram160Disk
}
HetznerServerType::SharedAmd8Core16Ram240Disk => {
common::HetznerServerType::SharedAmd8Core16Ram240Disk
}
HetznerServerType::SharedArm16Core32Ram320Disk => {
common::HetznerServerType::SharedArm16Core32Ram320Disk
}
HetznerServerType::SharedIntel16Core32Ram320Disk => {
common::HetznerServerType::SharedIntel16Core32Ram320Disk
}
HetznerServerType::SharedAmd16Core32Ram360Disk => {
common::HetznerServerType::SharedAmd16Core32Ram360Disk
}
HetznerServerType::DedicatedAmd2Core8Ram80Disk => {
common::HetznerServerType::DedicatedAmd2Core8Ram80Disk
}
HetznerServerType::DedicatedAmd4Core16Ram160Disk => {
common::HetznerServerType::DedicatedAmd4Core16Ram160Disk
}
HetznerServerType::DedicatedAmd8Core32Ram240Disk => {
common::HetznerServerType::DedicatedAmd8Core32Ram240Disk
}
HetznerServerType::DedicatedAmd16Core64Ram360Disk => {
common::HetznerServerType::DedicatedAmd16Core64Ram360Disk
}
HetznerServerType::DedicatedAmd32Core128Ram600Disk => {
common::HetznerServerType::DedicatedAmd32Core128Ram600Disk
}
HetznerServerType::DedicatedAmd48Core192Ram960Disk => {
common::HetznerServerType::DedicatedAmd48Core192Ram960Disk
}
}
}

View File

@@ -1,9 +1,10 @@
pub mod aws;
#[allow(unused)]
pub mod hetzner;
#[derive(Debug)]
pub enum BuildCleanupData {
/// Nothing to clean up
Server,
/// Clean up AWS instance
Server { repo_name: String },
Aws { instance_id: String, region: String },
}

View File

@@ -8,7 +8,7 @@ use komodo_client::entities::{
config::core::{
AwsCredentials, CoreConfig, DatabaseConfig, Env,
GithubWebhookAppConfig, GithubWebhookAppInstallationConfig,
OauthCredentials,
HetznerCredentials, OauthCredentials,
},
logger::LogConfig,
};
@@ -120,6 +120,11 @@ pub fn core_config() -> &'static CoreConfig {
.komodo_aws_secret_access_key)
.unwrap_or(config.aws.secret_access_key),
},
hetzner: HetznerCredentials {
token: maybe_read_item_from_file(env.komodo_hetzner_token_file, env
.komodo_hetzner_token)
.unwrap_or(config.hetzner.token),
},
github_webhook_app: GithubWebhookAppConfig {
app_id: maybe_read_item_from_file(env.komodo_github_webhook_app_app_id_file, env
.komodo_github_webhook_app_app_id)
@@ -134,7 +139,6 @@ pub fn core_config() -> &'static CoreConfig {
title: env.komodo_title.unwrap_or(config.title),
host: env.komodo_host.unwrap_or(config.host),
port: env.komodo_port.unwrap_or(config.port),
bind_ip: env.komodo_bind_ip.unwrap_or(config.bind_ip),
first_server: env.komodo_first_server.unwrap_or(config.first_server),
frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path),
jwt_ttl: env
@@ -172,8 +176,6 @@ pub fn core_config() -> &'static CoreConfig {
.unwrap_or(config.ui_write_disabled),
disable_confirm_dialog: env.komodo_disable_confirm_dialog
.unwrap_or(config.disable_confirm_dialog),
disable_websocket_reconnect: env.komodo_disable_websocket_reconnect
.unwrap_or(config.disable_websocket_reconnect),
enable_new_users: env.komodo_enable_new_users
.unwrap_or(config.enable_new_users),
disable_user_registration: env.komodo_disable_user_registration
@@ -191,7 +193,6 @@ pub fn core_config() -> &'static CoreConfig {
stdio: env
.komodo_logging_stdio
.unwrap_or(config.logging.stdio),
pretty: env.komodo_logging_pretty.unwrap_or(config.logging.pretty),
otlp_endpoint: env
.komodo_logging_otlp_endpoint
.unwrap_or(config.logging.otlp_endpoint),
@@ -199,7 +200,6 @@ pub fn core_config() -> &'static CoreConfig {
.komodo_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name),
},
pretty_startup_config: env.komodo_pretty_startup_config.unwrap_or(config.pretty_startup_config),
ssl_enabled: env.komodo_ssl_enabled.unwrap_or(config.ssl_enabled),
ssl_key_file: env.komodo_ssl_key_file.unwrap_or(config.ssl_key_file),
ssl_cert_file: env.komodo_ssl_cert_file.unwrap_or(config.ssl_cert_file),

View File

@@ -12,6 +12,7 @@ use komodo_client::entities::{
provider::{DockerRegistryAccount, GitProviderAccount},
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
stats::SystemStatsRecord,
sync::ResourceSync,
@@ -49,6 +50,7 @@ pub struct DbClient {
pub procedures: Collection<Procedure>,
pub actions: Collection<Action>,
pub alerters: Collection<Alerter>,
pub server_templates: Collection<ServerTemplate>,
pub resource_syncs: Collection<ResourceSync>,
pub stacks: Collection<Stack>,
//
@@ -118,6 +120,8 @@ impl DbClient {
alerters: resource_collection(&db, "Alerter").await?,
procedures: resource_collection(&db, "Procedure").await?,
actions: resource_collection(&db, "Action").await?,
server_templates: resource_collection(&db, "ServerTemplate")
.await?,
resource_syncs: resource_collection(&db, "ResourceSync")
.await?,
stacks: resource_collection(&db, "Stack").await?,

View File

@@ -7,6 +7,7 @@ use komodo_client::entities::{
builder::{AwsBuilderConfig, Builder, BuilderConfig},
komodo_timestamp,
server::Server,
server_template::aws::AwsServerTemplateConfig,
update::{Log, Update},
};
use periphery_client::{
@@ -60,7 +61,12 @@ pub async fn get_builder_periphery(
.health_check()
.await
.context("Url Builder failed health check")?;
Ok((periphery, BuildCleanupData::Server))
Ok((
periphery,
BuildCleanupData::Server {
repo_name: resource_name,
},
))
}
BuilderConfig::Server(config) => {
if config.server_id.is_empty() {
@@ -68,7 +74,12 @@ pub async fn get_builder_periphery(
}
let server = resource::get::<Server>(&config.server_id).await?;
let periphery = periphery_client(&server)?;
Ok((periphery, BuildCleanupData::Server))
Ok((
periphery,
BuildCleanupData::Server {
repo_name: resource_name,
},
))
}
BuilderConfig::Aws(config) => {
get_aws_builder(&resource_name, version, config, update).await
@@ -87,8 +98,11 @@ async fn get_aws_builder(
let version = version.map(|v| format!("-v{v}")).unwrap_or_default();
let instance_name = format!("BUILDER-{resource_name}{version}");
let Ec2Instance { instance_id, ip } =
launch_ec2_instance(&instance_name, &config).await?;
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
&instance_name,
AwsServerTemplateConfig::from_builder_config(&config),
)
.await?;
info!("ec2 instance launched");
@@ -165,14 +179,17 @@ async fn get_aws_builder(
)
}
#[instrument(skip(update))]
#[instrument(skip(periphery, update))]
pub async fn cleanup_builder_instance(
periphery: PeripheryClient,
cleanup_data: BuildCleanupData,
update: &mut Update,
) {
match cleanup_data {
BuildCleanupData::Server => {
// Nothing to clean up
BuildCleanupData::Server { repo_name } => {
let _ = periphery
.request(api::git::DeleteRepo { name: repo_name })
.await;
}
BuildCleanupData::Aws {
instance_id,

View File

@@ -1,32 +0,0 @@
use anyhow::Context;
pub enum Matcher<'a> {
Wildcard(wildcard::Wildcard<'a>),
Regex(regex::Regex),
}
impl<'a> Matcher<'a> {
pub fn new(pattern: &'a str) -> anyhow::Result<Self> {
if pattern.starts_with('\\') && pattern.ends_with('\\') {
let inner = &pattern[1..(pattern.len() - 1)];
let regex = regex::Regex::new(inner)
.with_context(|| format!("invalid regex. got: {inner}"))?;
Ok(Self::Regex(regex))
} else {
let wildcard = wildcard::Wildcard::new(pattern.as_bytes())
.with_context(|| {
format!("invalid wildcard. got: {pattern}")
})?;
Ok(Self::Wildcard(wildcard))
}
}
pub fn is_match(&self, source: &str) -> bool {
match self {
Matcher::Wildcard(wildcard) => {
wildcard.is_match(source.as_bytes())
}
Matcher::Regex(regex) => regex.is_match(source),
}
}
}

View File

@@ -1,28 +1,39 @@
use std::time::Duration;
use std::{str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use indexmap::IndexSet;
use komodo_client::entities::{
ResourceTarget,
permission::{
Permission, PermissionLevel, SpecificPermission, UserTarget,
use futures::future::join_all;
use komodo_client::{
api::write::{CreateBuilder, CreateServer},
entities::{
ResourceTarget,
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
komodo_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::Log,
user::{User, system_user},
},
server::Server,
user::User,
};
use mongo_indexed::Document;
use mungos::mongodb::bson::{Bson, doc};
use mungos::{
find::find_collect,
mongodb::bson::{Bson, doc, oid::ObjectId, to_document},
};
use periphery_client::PeripheryClient;
use rand::Rng;
use resolver_api::Resolve;
use crate::{config::core_config, state::db_client};
use crate::{
api::write::WriteArgs, config::core_config, resource,
state::db_client,
};
pub mod action_state;
pub mod builder;
pub mod cache;
pub mod channel;
pub mod interpolate;
pub mod matcher;
pub mod procedure;
pub mod prune;
pub mod query;
@@ -67,9 +78,6 @@ pub async fn git_token(
account_username: &str,
mut on_https_found: impl FnMut(bool),
) -> anyhow::Result<Option<String>> {
if provider_domain.is_empty() || account_username.is_empty() {
return Ok(None);
}
let db_provider = db_client()
.git_accounts
.find_one(doc! { "domain": provider_domain, "username": account_username })
@@ -135,11 +143,7 @@ pub fn periphery_client(
let client = PeripheryClient::new(
&server.config.address,
if server.config.passkey.is_empty() {
&core_config().passkey
} else {
&server.config.passkey
},
&core_config().passkey,
Duration::from_secs(server.config.timeout_seconds as u64),
);
@@ -151,7 +155,6 @@ pub async fn create_permission<T>(
user: &User,
target: T,
level: PermissionLevel,
specific: IndexSet<SpecificPermission>,
) where
T: Into<ResourceTarget> + std::fmt::Debug,
{
@@ -167,7 +170,6 @@ pub async fn create_permission<T>(
user_target: UserTarget::User(user.id.clone()),
resource_target: target.clone(),
level,
specific,
})
.await
{
@@ -194,3 +196,160 @@ pub fn flatten_document(doc: Document) -> Document {
target
}
pub async fn startup_cleanup() {
tokio::join!(
startup_in_progress_update_cleanup(),
startup_open_alert_cleanup(),
);
}
/// Run on startup, as no updates should be in progress on startup
async fn startup_in_progress_update_cleanup() {
let log = Log::error(
"Komodo shutdown",
String::from(
"Komodo shutdown during execution. If this is a build, the builder may not have been terminated.",
),
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
if let Err(e) = db_client()
.updates
.update_many(
doc! { "status": "InProgress" },
doc! {
"$set": {
"status": "Complete",
"success": false,
},
"$push": {
"logs": log
}
},
)
.await
{
error!("failed to cleanup in progress updates on startup | {e:#}")
}
}
/// Run on startup, ensure open alerts pointing to invalid resources are closed.
async fn startup_open_alert_cleanup() {
let db = db_client();
let Ok(alerts) =
find_collect(&db.alerts, doc! { "resolved": false }, None)
.await
.inspect_err(|e| {
error!(
"failed to list all alerts for startup open alert cleanup | {e:?}"
)
})
else {
return;
};
let futures = alerts.into_iter().map(|alert| async move {
match alert.target {
ResourceTarget::Server(id) => {
resource::get::<Server>(&id)
.await
.is_err()
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
}
ResourceTarget::ResourceSync(id) => {
resource::get::<ResourceSync>(&id)
.await
.is_err()
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
}
// No other resources should have open alerts.
_ => ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok(),
}
});
let to_update_ids = join_all(futures)
.await
.into_iter()
.flatten()
.collect::<Vec<_>>();
if let Err(e) = db
.alerts
.update_many(
doc! { "_id": { "$in": to_update_ids } },
doc! { "$set": {
"resolved": true,
"resolved_ts": komodo_timestamp()
} },
)
.await
{
error!(
"failed to clean up invalid open alerts on startup | {e:#}"
)
}
}
/// Ensures a default server / builder exists with the defined address
pub async fn ensure_first_server_and_builder() {
let first_server = &core_config().first_server;
if first_server.is_empty() {
return;
}
let db = db_client();
let Ok(server) = db
.servers
.find_one(Document::new())
.await
.inspect_err(|e| error!("Failed to initialize 'first_server'. Failed to query db. {e:?}"))
else {
return;
};
let server = if let Some(server) = server {
server
} else {
match (CreateServer {
name: format!("server-{}", random_string(5)),
config: PartialServerConfig {
address: Some(first_server.to_string()),
enabled: Some(true),
..Default::default()
},
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
{
Ok(server) => server,
Err(e) => {
error!(
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
e.error
);
return;
}
}
};
let Ok(None) = db.builders
.find_one(Document::new()).await
.inspect_err(|e| error!("Failed to initialize 'first_builder' | Failed to query db | {e:?}")) else {
return;
};
if let Err(e) = (CreateBuilder {
name: String::from("local"),
config: PartialBuilderConfig::Server(
PartialServerBuilderConfig {
server_id: Some(server.id),
},
),
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
{
error!(
"Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}",
e.error
);
}
}

View File

@@ -9,7 +9,6 @@ use komodo_client::{
action::Action,
build::Build,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
stack::Stack,
@@ -167,13 +166,6 @@ async fn execute_stage(
)
.await?;
}
Execution::BatchPullStack(exec) => {
extend_batch_exection::<BatchPullStack>(
&exec.pattern,
&mut executions,
)
.await?;
}
Execution::BatchDestroyStack(exec) => {
extend_batch_exection::<BatchDestroyStack>(
&exec.pattern,
@@ -993,12 +985,6 @@ async fn execute_execution(
)
.await?
}
Execution::BatchPullStack(_) => {
// All batch executions must be expanded in `execute_stage`
return Err(anyhow!(
"Batch method BatchPullStack not implemented correctly"
));
}
Execution::StartStack(req) => {
let req = ExecuteRequest::StartStack(req);
let update = init_execution_update(&req, &user).await?;
@@ -1190,7 +1176,6 @@ async fn extend_batch_exection<E: ExtendBatch>(
pattern,
Default::default(),
procedure_user(),
PermissionLevel::Read.into(),
&[],
)
.await?
@@ -1290,16 +1275,6 @@ impl ExtendBatch for BatchDeployStackIfChanged {
}
}
impl ExtendBatch for BatchPullStack {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {
Execution::PullStack(PullStack {
stack,
services: Vec::new(),
})
}
}
impl ExtendBatch for BatchDestroyStack {
type Resource = Stack;
fn single_execution(stack: String) -> Execution {

View File

@@ -1,11 +1,6 @@
use std::{
collections::HashMap,
str::FromStr,
sync::{Arc, OnceLock},
};
use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use async_timing_util::{ONE_MIN_MS, unix_timestamp_ms};
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
action::Action,
@@ -14,12 +9,12 @@ use komodo_client::entities::{
builder::Builder,
deployment::{Deployment, DeploymentState},
docker::container::{ContainerListItem, ContainerStateStatusEnum},
permission::{PermissionLevel, PermissionLevelAndSpecifics},
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::{Server, ServerState},
server_template::ServerTemplate,
stack::{Stack, StackServiceNames, StackState},
stats::SystemInformation,
sync::ResourceSync,
tag::Tag,
update::Update,
@@ -34,19 +29,14 @@ use mungos::{
options::FindOneOptions,
},
};
use periphery_client::api::stats;
use tokio::sync::Mutex;
use crate::{
config::core_config,
permission::get_user_permission_on_resource,
resource,
resource::{self, get_user_permission_on_resource},
stack::compose_container_match_regex,
state::{db_client, deployment_status_cache, stack_status_cache},
};
use super::periphery_client;
// user: Id or username
#[instrument(level = "debug")]
pub async fn get_user(user: &str) -> anyhow::Result<User> {
@@ -239,10 +229,7 @@ pub async fn get_user_user_groups(
find_collect(
&db_client().user_groups,
doc! {
"$or": [
{ "everyone": true },
{ "users": user_id },
]
"users": user_id
},
None,
)
@@ -281,9 +268,9 @@ pub fn user_target_query(
pub async fn get_user_permission_on_target(
user: &User,
target: &ResourceTarget,
) -> anyhow::Result<PermissionLevelAndSpecifics> {
) -> anyhow::Result<PermissionLevel> {
match target {
ResourceTarget::System(_) => Ok(PermissionLevel::None.into()),
ResourceTarget::System(_) => Ok(PermissionLevel::None),
ResourceTarget::Build(id) => {
get_user_permission_on_resource::<Build>(user, id).await
}
@@ -308,6 +295,10 @@ pub async fn get_user_permission_on_target(
ResourceTarget::Action(id) => {
get_user_permission_on_resource::<Action>(user, id).await
}
ResourceTarget::ServerTemplate(id) => {
get_user_permission_on_resource::<ServerTemplate>(user, id)
.await
}
ResourceTarget::ResourceSync(id) => {
get_user_permission_on_resource::<ResourceSync>(user, id).await
}
@@ -391,36 +382,3 @@ pub async fn get_variables_and_secrets()
Ok(VariablesAndSecrets { variables, secrets })
}
// This protects the peripheries from spam requests
const SYSTEM_INFO_EXPIRY: u128 = ONE_MIN_MS;
type SystemInfoCache =
Mutex<HashMap<String, Arc<(SystemInformation, u128)>>>;
fn system_info_cache() -> &'static SystemInfoCache {
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
OnceLock::new();
SYSTEM_INFO_CACHE.get_or_init(Default::default)
}
pub async fn get_system_info(
server: &Server,
) -> anyhow::Result<SystemInformation> {
let mut lock = system_info_cache().lock().await;
let res = match lock.get(&server.id) {
Some(cached) if cached.1 > unix_timestamp_ms() => {
cached.0.clone()
}
_ => {
let stats = periphery_client(server)?
.request(stats::GetSystemInformation {})
.await?;
lock.insert(
server.id.clone(),
(stats.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
.into(),
);
stats
}
};
Ok(res)
}

View File

@@ -9,6 +9,7 @@ use komodo_client::entities::{
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
update::{Update, UpdateListItem},
@@ -384,6 +385,16 @@ pub async fn init_execution_update(
return Ok(Default::default());
}
// Server template
ExecuteRequest::LaunchServer(data) => (
Operation::LaunchServer,
ResourceTarget::ServerTemplate(
resource::get::<ServerTemplate>(&data.server_template)
.await?
.id,
),
),
// Resource Sync
ExecuteRequest::RunSync(data) => (
Operation::RunSync,
@@ -435,9 +446,6 @@ pub async fn init_execution_update(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::BatchPullStack(_data) => {
return Ok(Default::default());
}
ExecuteRequest::RestartStack(data) => (
if !data.services.is_empty() {
Operation::RestartStackService

View File

@@ -22,11 +22,8 @@ mod db;
mod helpers;
mod listener;
mod monitor;
mod permission;
mod resource;
mod schedule;
mod stack;
mod startup;
mod state;
mod sync;
mod ts_client;
@@ -36,40 +33,32 @@ async fn app() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
let config = core_config();
logger::init(&config.logging)?;
if let Err(e) =
rustls::crypto::aws_lc_rs::default_provider().install_default()
{
error!("Failed to install default crypto provider | {e:?}");
std::process::exit(1);
};
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
info!("{:?}", config.sanitized());
if core_config().pretty_startup_config {
info!("{:#?}", config.sanitized());
} else {
info!("{:?}", config.sanitized());
}
// Init jwt client to crash on failure
state::jwt_client();
tokio::join!(
// Init db_client check to crash on db init failure
state::init_db_client(),
// Manage OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::spawn_oidc_client_management()
);
// Run after db connection.
startup::on_startup().await;
tokio::join!(
// Maybe initialize first server
helpers::ensure_first_server_and_builder(),
// Cleanup open updates / invalid alerts
helpers::startup_cleanup(),
);
// init jwt client to crash on failure
state::jwt_client();
// Spawn background tasks
// Spawn tasks
monitor::spawn_monitor_loop();
resource::spawn_resource_refresh_loop();
resource::spawn_build_state_refresh_loop();
resource::spawn_repo_state_refresh_loop();
resource::spawn_procedure_state_refresh_loop();
resource::spawn_action_state_refresh_loop();
schedule::spawn_schedule_executor();
helpers::prune::spawn_prune_loop();
// Setup static frontend services
@@ -85,7 +74,6 @@ async fn app() -> anyhow::Result<()> {
.nest("/read", api::read::router())
.nest("/write", api::write::router())
.nest("/execute", api::execute::router())
.nest("/terminal", api::terminal::router())
.nest("/listener", listener::router())
.nest("/ws", ws::router())
.nest("/client", ts_client::router())
@@ -98,10 +86,9 @@ async fn app() -> anyhow::Result<()> {
)
.into_make_service();
let addr =
format!("{}:{}", core_config().bind_ip, core_config().port);
let socket_addr = SocketAddr::from_str(&addr)
.context("failed to parse listen address")?;
let socket_addr =
SocketAddr::from_str(&format!("0.0.0.0:{}", core_config().port))
.context("failed to parse socket addr")?;
if config.ssl_enabled {
info!("🔒 Core SSL Enabled");

View File

@@ -2,8 +2,7 @@ use std::collections::HashMap;
use anyhow::Context;
use komodo_client::entities::{
permission::PermissionLevel, resource::ResourceQuery,
server::Server, user::User,
resource::ResourceQuery, server::Server, user::User,
};
use crate::resource;
@@ -40,7 +39,6 @@ async fn get_all_servers_map()
admin: true,
..Default::default()
},
PermissionLevel::Read.into(),
&[],
)
.await

View File

@@ -1,229 +0,0 @@
use std::collections::HashSet;
use anyhow::{Context, anyhow};
use futures::{FutureExt, future::BoxFuture};
use indexmap::IndexSet;
use komodo_client::{
api::read::GetPermission,
entities::{
permission::{PermissionLevel, PermissionLevelAndSpecifics},
resource::Resource,
user::User,
},
};
use mongo_indexed::doc;
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
api::read::ReadArgs,
config::core_config,
helpers::query::{get_user_user_groups, user_target_query},
resource::{KomodoResource, get},
state::db_client,
};
pub async fn get_check_permissions<T: KomodoResource>(
id_or_name: &str,
user: &User,
required_permissions: PermissionLevelAndSpecifics,
) -> anyhow::Result<Resource<T::Config, T::Info>> {
let resource = get::<T>(id_or_name).await?;
// Allow all if admin
if user.admin {
return Ok(resource);
}
let user_permissions =
get_user_permission_on_resource::<T>(user, &resource.id).await?;
if (
// Allow if its just read or below, and transparent mode enabled
(required_permissions.level <= PermissionLevel::Read && core_config().transparent_mode)
// Allow if resource has base permission level greater than or equal to required permission level
|| resource.base_permission.level >= required_permissions.level
) && user_permissions
.fulfills_specific(&required_permissions.specific)
{
return Ok(resource);
}
if user_permissions.fulfills(&required_permissions) {
Ok(resource)
} else {
Err(anyhow!(
"User does not have required permissions on this {}. Must have at least {} permissions{}",
T::resource_type(),
required_permissions.level,
if required_permissions.specific.is_empty() {
String::new()
} else {
format!(
", as well as these specific permissions: [{}]",
required_permissions.specifics_for_log()
)
}
))
}
}
#[instrument(level = "debug")]
pub fn get_user_permission_on_resource<'a, T: KomodoResource>(
user: &'a User,
resource_id: &'a str,
) -> BoxFuture<'a, anyhow::Result<PermissionLevelAndSpecifics>> {
Box::pin(async {
// Admin returns early with max permissions
if user.admin {
return Ok(PermissionLevel::Write.all());
}
let resource_type = T::resource_type();
let resource = get::<T>(resource_id).await?;
let initial_specific = if let Some(additional_target) =
T::inherit_specific_permissions_from(&resource)
{
GetPermission {
target: additional_target,
}
.resolve(&ReadArgs { user: user.clone() })
.await
.map_err(|e| e.error)
.context("failed to get user permission on additional target")?
.specific
} else {
IndexSet::new()
};
let mut permission = PermissionLevelAndSpecifics {
level: if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
},
specific: initial_specific,
};
// Add in the resource level global base permissions
if resource.base_permission.level > permission.level {
permission.level = resource.base_permission.level;
}
permission
.specific
.extend(resource.base_permission.specific);
// Overlay users base on resource variant
if let Some(user_permission) =
user.all.get(&resource_type).cloned()
{
if user_permission.level > permission.level {
permission.level = user_permission.level;
}
permission.specific.extend(user_permission.specific);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(group_permission) =
group.all.get(&resource_type).cloned()
{
if group_permission.level > permission.level {
permission.level = group_permission.level;
}
permission.specific.extend(group_permission.specific);
}
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max resource permission user has between personal / any user groups
.fold(permission, |mut permission, resource_permission| {
if resource_permission.level > permission.level {
permission.level = resource_permission.level
}
permission.specific.extend(resource_permission.specific);
permission
});
Ok(permission)
})
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_user<T: KomodoResource>(
user: &User,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
}
let resource_type = T::resource_type();
// Check user 'all' on variant
if let Some(permission) = user.all.get(&resource_type).cloned() {
if permission.level > PermissionLevel::None {
return Ok(None);
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(permission) = group.all.get(&resource_type).cloned() {
if permission.level > PermissionLevel::None {
return Ok(None);
}
}
}
let (base, perms) = tokio::try_join!(
// Get any resources with non-none base permission,
find_collect(
T::coll(),
doc! { "$or": [
{ "base_permission": { "$in": ["Read", "Execute", "Write"] } },
{ "base_permission.level": { "$in": ["Read", "Execute", "Write"] } }
] },
None,
)
.map(|res| res.with_context(|| format!(
"failed to query {resource_type} on db"
))),
// And any ids using the permissions table
find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
},
None,
)
.map(|res| res.context("failed to query permissions on db"))
)?;
// Add specific ids
let ids = perms
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// Chain in the ones with non-None base permissions
.chain(base.into_iter().map(|res| res.id))
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
Ok(Some(ids.into_iter().collect()))
}

View File

@@ -2,7 +2,7 @@ use std::time::Duration;
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
Operation, ResourceTargetVariant,
action::{
Action, ActionConfig, ActionConfigDiff, ActionInfo,
ActionListItem, ActionListItemInfo, ActionQuerySpecifics,
@@ -17,12 +17,7 @@ use mungos::{
mongodb::{Collection, bson::doc, options::FindOneOptions},
};
use crate::{
schedule::{
cancel_schedule, get_schedule_item_info, update_schedule,
},
state::{action_state_cache, action_states, db_client},
};
use crate::state::{action_state_cache, action_states, db_client};
impl super::KomodoResource for Action {
type Config = ActionConfig;
@@ -36,10 +31,6 @@ impl super::KomodoResource for Action {
ResourceTargetVariant::Action
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Action(id.into())
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().actions
@@ -49,9 +40,6 @@ impl super::KomodoResource for Action {
action: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let state = get_action_state(&action.id).await;
let (next_scheduled_run, schedule_error) = get_schedule_item_info(
&ResourceTarget::Action(action.id.clone()),
);
ActionListItem {
name: action.name,
id: action.id,
@@ -60,8 +48,6 @@ impl super::KomodoResource for Action {
info: ActionListItemInfo {
state,
last_run_at: action.info.last_run_at,
next_scheduled_run,
schedule_error,
},
}
}
@@ -97,10 +83,9 @@ impl super::KomodoResource for Action {
}
async fn post_create(
created: &Resource<Self::Config, Self::Info>,
_created: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
update_schedule(created);
refresh_action_state_cache().await;
Ok(())
}
@@ -146,10 +131,9 @@ impl super::KomodoResource for Action {
}
async fn post_delete(
resource: &Resource<Self::Config, Self::Info>,
_resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
cancel_schedule(&ResourceTarget::Action(resource.id.clone()));
Ok(())
}
}

View File

@@ -1,6 +1,6 @@
use derive_variants::ExtractVariant;
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
Operation, ResourceTargetVariant,
alerter::{
Alerter, AlerterConfig, AlerterConfigDiff, AlerterListItem,
AlerterListItemInfo, AlerterQuerySpecifics, PartialAlerterConfig,
@@ -25,10 +25,6 @@ impl super::KomodoResource for Alerter {
ResourceTargetVariant::Alerter
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Alerter(id.into())
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().alerters

View File

@@ -1,33 +1,26 @@
use std::time::Duration;
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::write::RefreshBuildCache,
entities::{
Operation, ResourceTarget, ResourceTargetVariant,
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo, BuildListItem,
BuildListItemInfo, BuildQuerySpecifics, BuildState,
PartialBuildConfig,
},
builder::Builder,
environment_vars_from_str, optional_string,
permission::PermissionLevel,
resource::Resource,
to_docker_compatible_name,
update::Update,
user::{User, build_user},
use komodo_client::entities::{
Operation, ResourceTargetVariant,
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo, BuildListItem,
BuildListItemInfo, BuildQuerySpecifics, BuildState,
PartialBuildConfig,
},
builder::Builder,
environment_vars_from_str,
permission::PermissionLevel,
resource::Resource,
update::Update,
user::User,
};
use mungos::{
find::find_collect,
mongodb::{Collection, bson::doc, options::FindOptions},
};
use resolver_api::Resolve;
use crate::{
api::write::WriteArgs,
config::core_config,
helpers::{empty_or_only_spaces, query::get_latest_update},
state::{action_states, build_state_cache, db_client},
@@ -45,14 +38,6 @@ impl super::KomodoResource for Build {
ResourceTargetVariant::Build
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Build(id.into())
}
fn validated_name(name: &str) -> String {
to_docker_compatible_name(name)
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().builds
@@ -71,13 +56,10 @@ impl super::KomodoResource for Build {
last_built_at: build.info.last_built_at,
version: build.config.version,
builder_id: build.config.builder_id,
files_on_host: build.config.files_on_host,
git_provider: optional_string(build.config.git_provider),
repo: optional_string(build.config.repo),
branch: optional_string(build.config.branch),
image_registry_domain: optional_string(
build.config.image_registry.domain,
),
git_provider: build.config.git_provider,
image_registry_domain: build.config.image_registry.domain,
repo: build.config.repo,
branch: build.config.branch,
built_hash: build.info.built_hash,
latest_hash: build.info.latest_hash,
state,
@@ -114,23 +96,10 @@ impl super::KomodoResource for Build {
}
async fn post_create(
created: &Resource<Self::Config, Self::Info>,
update: &mut Update,
_created: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
refresh_build_state_cache().await;
if let Err(e) = (RefreshBuildCache {
build: created.name.clone(),
})
.resolve(&WriteArgs {
user: build_user().to_owned(),
})
.await
{
update.push_error_log(
"Refresh build cache",
format_serror(&e.error.context("The build cache has failed to refresh. This may be due to a misconfiguration of the Build").into())
);
};
Ok(())
}
@@ -219,7 +188,7 @@ async fn validate_config(
let builder = super::get_check_permissions::<Builder>(
builder_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Read,
)
.await
.context("Cannot attach Build to this Builder")?;

View File

@@ -1,13 +1,12 @@
use anyhow::Context;
use indexmap::IndexSet;
use komodo_client::entities::{
MergePartial, Operation, ResourceTarget, ResourceTargetVariant,
MergePartial, Operation, ResourceTargetVariant,
builder::{
Builder, BuilderConfig, BuilderConfigDiff, BuilderConfigVariant,
BuilderListItem, BuilderListItemInfo, BuilderQuerySpecifics,
PartialBuilderConfig, PartialServerBuilderConfig,
},
permission::{PermissionLevel, SpecificPermission},
permission::PermissionLevel,
resource::Resource,
server::Server,
update::Update,
@@ -32,14 +31,6 @@ impl super::KomodoResource for Builder {
ResourceTargetVariant::Builder
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Builder(id.into())
}
fn creator_specific_permissions() -> IndexSet<SpecificPermission> {
[SpecificPermission::Attach].into_iter().collect()
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().builders
@@ -185,7 +176,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Write,
)
.await?;
*server_id = server.id;

View File

@@ -1,8 +1,7 @@
use anyhow::Context;
use formatting::format_serror;
use indexmap::IndexSet;
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
Operation, ResourceTargetVariant,
build::Build,
deployment::{
Deployment, DeploymentConfig, DeploymentConfigDiff,
@@ -11,10 +10,9 @@ use komodo_client::entities::{
PartialDeploymentConfig, conversions_from_str,
},
environment_vars_from_str,
permission::{PermissionLevel, SpecificPermission},
permission::PermissionLevel,
resource::Resource,
server::Server,
to_docker_compatible_name,
update::Update,
user::User,
};
@@ -45,30 +43,6 @@ impl super::KomodoResource for Deployment {
ResourceTargetVariant::Deployment
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Deployment(id.into())
}
fn validated_name(name: &str) -> String {
to_docker_compatible_name(name)
}
fn creator_specific_permissions() -> IndexSet<SpecificPermission> {
[
SpecificPermission::Inspect,
SpecificPermission::Logs,
SpecificPermission::Terminal,
]
.into_iter()
.collect()
}
fn inherit_specific_permissions_from(
_self: &Resource<Self::Config, Self::Info>,
) -> Option<ResourceTarget> {
ResourceTarget::Server(_self.config.server_id.clone()).into()
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().deployments
@@ -306,7 +280,7 @@ async fn validate_config(
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Write,
)
.await
.context("Cannot attach Deployment to this Server")?;
@@ -320,7 +294,7 @@ async fn validate_config(
let build = get_check_permissions::<Build>(
build_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Read,
)
.await
.context(

View File

@@ -5,20 +5,16 @@ use std::{
use anyhow::{Context, anyhow};
use formatting::format_serror;
use futures::future::join_all;
use indexmap::IndexSet;
use futures::{FutureExt, future::join_all};
use komodo_client::{
api::{read::ExportResourcesToToml, write::CreateTag},
entities::{
Operation, ResourceTarget, ResourceTargetVariant,
komodo_timestamp,
permission::{
PermissionLevel, PermissionLevelAndSpecifics,
SpecificPermission,
},
permission::PermissionLevel,
resource::{AddFilters, Resource, ResourceQuery},
tag::Tag,
to_general_name,
to_komodo_name,
update::Update,
user::{User, system_user},
},
@@ -33,18 +29,21 @@ use mungos::{
options::FindOptions,
},
};
use partial_derive2::{Diff, MaybeNone, PartialDiff};
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
use resolver_api::Resolve;
use serde::{Serialize, de::DeserializeOwned};
use crate::{
api::{read::ReadArgs, write::WriteArgs},
config::core_config,
helpers::{
create_permission, flatten_document,
query::{get_tag, id_or_name_filter},
query::{
get_tag, get_user_user_groups, id_or_name_filter,
user_target_query,
},
update::{add_update, make_update},
},
permission::{get_check_permissions, get_resource_ids_for_user},
state::db_client,
};
@@ -57,6 +56,7 @@ mod procedure;
mod refresh;
mod repo;
mod server;
mod server_template;
mod stack;
mod sync;
@@ -107,7 +107,6 @@ pub trait KomodoResource {
type QuerySpecifics: AddFilters + Default + std::fmt::Debug;
fn resource_type() -> ResourceTargetVariant;
fn resource_target(id: impl Into<String>) -> ResourceTarget;
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>;
@@ -118,28 +117,6 @@ pub trait KomodoResource {
#[allow(clippy::ptr_arg)]
async fn busy(id: &String) -> anyhow::Result<bool>;
/// Some resource types have restrictions on the allowed formatting for names.
/// Stacks, Builds, and Deployments all require names to be "docker compatible",
/// which means all lowercase, and no spaces or dots.
fn validated_name(name: &str) -> String {
to_general_name(name)
}
/// These permissions go to the creator of the resource,
/// and include full access to the resource.
fn creator_specific_permissions() -> IndexSet<SpecificPermission> {
IndexSet::new()
}
/// For Stacks / Deployments, they should inherit specific
/// permissions like `Logs`, `Inspect`, and `Terminal`
/// from their attached Server.
fn inherit_specific_permissions_from(
_self: &Resource<Self::Config, Self::Info>,
) -> Option<ResourceTarget> {
None
}
// =======
// CREATE
// =======
@@ -236,6 +213,106 @@ pub async fn get<T: KomodoResource>(
})
}
pub async fn get_check_permissions<T: KomodoResource>(
id_or_name: &str,
user: &User,
permission_level: PermissionLevel,
) -> anyhow::Result<Resource<T::Config, T::Info>> {
let resource = get::<T>(id_or_name).await?;
if user.admin
// Allow if its just read or below, and transparent mode enabled
|| (permission_level <= PermissionLevel::Read
&& core_config().transparent_mode)
// Allow if resource has base permission level greater than or equal to required permission level
|| resource.base_permission >= permission_level
{
return Ok(resource);
}
let permissions =
get_user_permission_on_resource::<T>(user, &resource.id).await?;
if permissions >= permission_level {
Ok(resource)
} else {
Err(anyhow!(
"User does not have required permissions on this {}. Must have at least {permission_level} permissions",
T::resource_type()
))
}
}
#[instrument(level = "debug")]
pub async fn get_user_permission_on_resource<T: KomodoResource>(
user: &User,
resource_id: &str,
) -> anyhow::Result<PermissionLevel> {
if user.admin {
return Ok(PermissionLevel::Write);
}
let resource_type = T::resource_type();
// Start with base of Read or None
let mut base = if core_config().transparent_mode {
PermissionLevel::Read
} else {
PermissionLevel::None
};
// Add in the resource level global base permission
let resource_base = get::<T>(resource_id).await?.base_permission;
if resource_base > base {
base = resource_base;
}
// Overlay users base on resource variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any user groups base on resource variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > base {
base = level;
}
}
}
if base == PermissionLevel::Write {
// No reason to keep going if already Write at this point.
return Ok(PermissionLevel::Write);
}
// Overlay any specific permissions
let permission = find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"resource_target.id": resource_id
},
None,
)
.await
.context("failed to query db for permissions")?
.into_iter()
// get the max permission user has between personal / any user groups
.fold(base, |level, permission| {
if permission.level > level {
permission.level
} else {
level
}
});
Ok(permission)
}
// ======
// LIST
// ======
@@ -255,17 +332,80 @@ pub async fn get_resource_object_ids_for_user<T: KomodoResource>(
})
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_user<T: KomodoResource>(
user: &User,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
}
let resource_type = T::resource_type();
// Check user 'all' on variant
if let Some(level) = user.all.get(&resource_type).cloned() {
if level > PermissionLevel::None {
return Ok(None);
}
}
// Check user groups 'all' on variant
let groups = get_user_user_groups(&user.id).await?;
for group in &groups {
if let Some(level) = group.all.get(&resource_type).cloned() {
if level > PermissionLevel::None {
return Ok(None);
}
}
}
let (base, perms) = tokio::try_join!(
// Get any resources with non-none base permission,
find_collect(
T::coll(),
doc! { "base_permission": { "$exists": true, "$ne": "None" } },
None,
)
.map(|res| res.with_context(|| format!(
"failed to query {resource_type} on db"
))),
// And any ids using the permissions table
find_collect(
&db_client().permissions,
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$exists": true, "$ne": "None" }
},
None,
)
.map(|res| res.context("failed to query permissions on db"))
)?;
// Add specific ids
let ids = perms
.into_iter()
.map(|p| p.resource_target.extract_variant_id().1.to_string())
// Chain in the ones with non-None base permissions
.chain(base.into_iter().map(|res| res.id))
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>();
Ok(Some(ids.into_iter().collect()))
}
#[instrument(level = "debug")]
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_for_user_using_document::<T>(filters, user, permissions).await
list_for_user_using_document::<T>(filters, user).await
}
#[instrument(level = "debug")]
@@ -273,15 +413,10 @@ pub async fn list_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
let list = list_full_for_user_using_pattern::<T>(
pattern,
query,
user,
permissions,
all_tags,
pattern, query, user, all_tags,
)
.await?
.into_iter()
@@ -293,7 +428,6 @@ pub async fn list_for_user_using_pattern<T: KomodoResource>(
pub async fn list_for_user_using_document<T: KomodoResource>(
filters: Document,
user: &User,
permissions: PermissionLevelAndSpecifics,
) -> anyhow::Result<Vec<T::ListItem>> {
let list = list_full_for_user_using_document::<T>(filters, user)
.await?
@@ -315,12 +449,10 @@ pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pattern: &str,
query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
let resources =
list_full_for_user::<T>(query, user, permissions, all_tags)
.await?;
list_full_for_user::<T>(query, user, all_tags).await?;
let patterns = parse_string_list(pattern);
let mut names = HashSet::<String>::new();
@@ -357,7 +489,6 @@ pub async fn list_full_for_user_using_pattern<T: KomodoResource>(
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
permissions: PermissionLevelAndSpecifics,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
validate_resource_query_tags(&mut query, all_tags)?;
@@ -459,7 +590,7 @@ pub async fn create<T: KomodoResource>(
return Err(anyhow!("Must provide non-empty name for resource."));
}
let name = T::validated_name(name);
let name = to_komodo_name(name);
if ObjectId::from_str(&name).is_ok() {
return Err(anyhow!("valid ObjectIds cannot be used as names."));
@@ -467,16 +598,11 @@ pub async fn create<T: KomodoResource>(
// Ensure an existing resource with same name doesn't already exist
// The database indexing also ensures this but doesn't give a good error message.
if list_full_for_user::<T>(
Default::default(),
system_user(),
PermissionLevel::Read.into(),
&[],
)
.await
.context("Failed to list all resources for duplicate name check")?
.into_iter()
.any(|r| r.name == name)
if list_full_for_user::<T>(Default::default(), system_user(), &[])
.await
.context("Failed to list all resources for duplicate name check")?
.into_iter()
.any(|r| r.name == name)
{
return Err(anyhow!("Must provide unique name for resource."));
}
@@ -493,7 +619,7 @@ pub async fn create<T: KomodoResource>(
tags: Default::default(),
config: config.into(),
info: T::default_info().await?,
base_permission: PermissionLevel::None.into(),
base_permission: PermissionLevel::None,
};
let resource_id = T::coll()
@@ -510,13 +636,8 @@ pub async fn create<T: KomodoResource>(
let resource = get::<T>(&resource_id).await?;
let target = resource_target::<T>(resource_id);
create_permission(
user,
target.clone(),
PermissionLevel::Write,
T::creator_specific_permissions(),
)
.await;
create_permission(user, target.clone(), PermissionLevel::Write)
.await;
let mut update = make_update(target, T::create_operation(), user);
update.start_ts = start_ts;
@@ -555,7 +676,7 @@ pub async fn update<T: KomodoResource>(
let resource = get_check_permissions::<T>(
id_or_name,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -572,17 +693,13 @@ pub async fn update<T: KomodoResource>(
return Ok(resource);
}
// Leave this Result unhandled for now
let prev_toml = ExportResourcesToToml {
targets: vec![T::resource_target(&resource.id)],
..Default::default()
let mut diff_log = String::from("diff");
for FieldDiff { field, from, to } in diff.iter_field_diffs() {
diff_log.push_str(&format!(
"\n\n<span class=\"text-muted-foreground\">field</span>: '{field}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-700 dark:text-red-400\">{from}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-700 dark:text-green-400\">{to}</span>",
));
}
.resolve(&ReadArgs {
user: system_user().to_owned(),
})
.await
.map_err(|e| e.error)
.context("Failed to export resource toml before update");
// This minimizes the update against the existing config
let config: T::PartialConfig = diff.into();
@@ -598,35 +715,13 @@ pub async fn update<T: KomodoResource>(
.await
.context("failed to update resource on database")?;
let curr_toml = ExportResourcesToToml {
targets: vec![T::resource_target(&id)],
..Default::default()
}
.resolve(&ReadArgs {
user: system_user().to_owned(),
})
.await
.map_err(|e| e.error)
.context("Failed to export resource toml after update");
let mut update = make_update(
resource_target::<T>(id),
T::update_operation(),
user,
);
match prev_toml {
Ok(res) => update.prev_toml = res.toml,
Err(e) => update
// These logs are pushed with success == true, so user still knows the update was succesful.
.push_simple_log("Failed export", format_serror(&e.into())),
}
match curr_toml {
Ok(res) => update.current_toml = res.toml,
Err(e) => update
// These logs are pushed with success == true, so user still knows the update was succesful.
.push_simple_log("Failed export", format_serror(&e.into())),
}
update.push_simple_log("update config", diff_log);
let updated = get::<T>(id_or_name).await?;
@@ -651,6 +746,9 @@ fn resource_target<T: KomodoResource>(id: String) -> ResourceTarget {
ResourceTargetVariant::Repo => ResourceTarget::Repo(id),
ResourceTargetVariant::Alerter => ResourceTarget::Alerter(id),
ResourceTargetVariant::Procedure => ResourceTarget::Procedure(id),
ResourceTargetVariant::ServerTemplate => {
ResourceTarget::ServerTemplate(id)
}
ResourceTargetVariant::ResourceSync => {
ResourceTarget::ResourceSync(id)
}
@@ -667,7 +765,7 @@ pub async fn update_description<T: KomodoResource>(
get_check_permissions::<T>(
id_or_name,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
T::coll()
@@ -731,7 +829,7 @@ pub async fn rename<T: KomodoResource>(
let resource = get_check_permissions::<T>(
id_or_name,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -741,7 +839,7 @@ pub async fn rename<T: KomodoResource>(
user,
);
let name = T::validated_name(name);
let name = to_komodo_name(name);
update_one_by_id(
T::coll(),
@@ -785,7 +883,7 @@ pub async fn delete<T: KomodoResource>(
let resource = get_check_permissions::<T>(
id_or_name,
&args.user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
@@ -895,6 +993,9 @@ where
ResourceTarget::Stack(id) => ("recents.Stack", id),
ResourceTarget::Builder(id) => ("recents.Builder", id),
ResourceTarget::Alerter(id) => ("recents.Alerter", id),
ResourceTarget::ServerTemplate(id) => {
("recents.ServerTemplate", id)
}
ResourceTarget::ResourceSync(id) => ("recents.ResourceSync", id),
ResourceTarget::System(_) => return,
};

View File

@@ -4,7 +4,7 @@ use anyhow::{Context, anyhow};
use komodo_client::{
api::execute::Execution,
entities::{
Operation, ResourceTarget, ResourceTargetVariant,
Operation, ResourceTargetVariant,
action::Action,
alerter::Alerter,
build::Build,
@@ -31,9 +31,6 @@ use mungos::{
use crate::{
config::core_config,
schedule::{
cancel_schedule, get_schedule_item_info, update_schedule,
},
state::{action_states, db_client, procedure_state_cache},
};
@@ -49,10 +46,6 @@ impl super::KomodoResource for Procedure {
ResourceTargetVariant::Procedure
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Procedure(id.into())
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().procedures
@@ -62,9 +55,6 @@ impl super::KomodoResource for Procedure {
procedure: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let state = get_procedure_state(&procedure.id).await;
let (next_scheduled_run, schedule_error) = get_schedule_item_info(
&ResourceTarget::Procedure(procedure.id.clone()),
);
ProcedureListItem {
name: procedure.name,
id: procedure.id,
@@ -73,8 +63,6 @@ impl super::KomodoResource for Procedure {
info: ProcedureListItemInfo {
stages: procedure.config.stages.len() as i64,
state,
next_scheduled_run,
schedule_error,
},
}
}
@@ -106,10 +94,9 @@ impl super::KomodoResource for Procedure {
}
async fn post_create(
created: &Resource<Self::Config, Self::Info>,
_created: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
update_schedule(created);
refresh_procedure_state_cache().await;
Ok(())
}
@@ -155,10 +142,9 @@ impl super::KomodoResource for Procedure {
}
async fn post_delete(
resource: &Resource<Self::Config, Self::Info>,
_resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
cancel_schedule(&ResourceTarget::Procedure(resource.id.clone()));
Ok(())
}
}
@@ -180,7 +166,7 @@ async fn validate_config(
let procedure = super::get_check_permissions::<Procedure>(
&params.procedure,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
match id {
@@ -204,7 +190,7 @@ async fn validate_config(
let action = super::get_check_permissions::<Action>(
&params.action,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.action = action.id;
@@ -220,7 +206,7 @@ async fn validate_config(
let build = super::get_check_permissions::<Build>(
&params.build,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.build = build.id;
@@ -236,7 +222,7 @@ async fn validate_config(
let build = super::get_check_permissions::<Build>(
&params.build,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.build = build.id;
@@ -246,7 +232,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -263,7 +249,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -273,7 +259,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -283,7 +269,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -293,7 +279,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -303,7 +289,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -313,7 +299,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -323,7 +309,7 @@ async fn validate_config(
super::get_check_permissions::<Deployment>(
&params.deployment,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.deployment = deployment.id;
@@ -339,7 +325,7 @@ async fn validate_config(
let repo = super::get_check_permissions::<Repo>(
&params.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.repo = repo.id;
@@ -355,7 +341,7 @@ async fn validate_config(
let repo = super::get_check_permissions::<Repo>(
&params.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.repo = repo.id;
@@ -371,7 +357,7 @@ async fn validate_config(
let repo = super::get_check_permissions::<Repo>(
&params.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.repo = repo.id;
@@ -387,7 +373,7 @@ async fn validate_config(
let repo = super::get_check_permissions::<Repo>(
&params.repo,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.repo = repo.id;
@@ -396,7 +382,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -405,7 +391,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -414,7 +400,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -423,7 +409,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -432,7 +418,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -441,7 +427,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -450,7 +436,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -459,7 +445,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -468,7 +454,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -477,7 +463,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -486,7 +472,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -495,7 +481,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -504,7 +490,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -513,7 +499,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -522,7 +508,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -531,7 +517,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -540,7 +526,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -549,7 +535,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -558,7 +544,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -567,7 +553,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -576,7 +562,7 @@ async fn validate_config(
let server = super::get_check_permissions::<Server>(
&params.server,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.server = server.id;
@@ -585,7 +571,7 @@ async fn validate_config(
let sync = super::get_check_permissions::<ResourceSync>(
&params.sync,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.sync = sync.id;
@@ -595,7 +581,7 @@ async fn validate_config(
let sync = super::get_check_permissions::<ResourceSync>(
&params.sync,
user,
PermissionLevel::Write.into(),
PermissionLevel::Write,
)
.await?;
params.sync = sync.id;
@@ -604,7 +590,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -620,7 +606,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -636,23 +622,16 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
}
Execution::BatchPullStack(_params) => {
if !user.admin {
return Err(anyhow!(
"Non admin user cannot configure Batch executions"
));
}
}
Execution::StartStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -661,7 +640,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -670,7 +649,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -679,7 +658,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -688,7 +667,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -697,7 +676,7 @@ async fn validate_config(
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
@@ -713,7 +692,7 @@ async fn validate_config(
let alerter = super::get_check_permissions::<Alerter>(
&params.alerter,
user,
PermissionLevel::Execute.into(),
PermissionLevel::Execute,
)
.await?;
params.alerter = alerter.id;

View File

@@ -3,7 +3,7 @@ use std::time::Duration;
use anyhow::Context;
use formatting::format_serror;
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
Operation, ResourceTargetVariant,
builder::Builder,
permission::PermissionLevel,
repo::{
@@ -12,7 +12,7 @@ use komodo_client::entities::{
},
resource::Resource,
server::Server,
to_path_compatible_name,
to_komodo_name,
update::Update,
user::User,
};
@@ -44,14 +44,6 @@ impl super::KomodoResource for Repo {
ResourceTargetVariant::Repo
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Repo(id.into())
}
fn validated_name(name: &str) -> String {
to_path_compatible_name(name)
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().repos
@@ -174,11 +166,10 @@ impl super::KomodoResource for Repo {
match periphery
.request(DeleteRepo {
name: if repo.config.path.is_empty() {
to_path_compatible_name(&repo.name)
to_komodo_name(&repo.name)
} else {
repo.config.path.clone()
},
is_build: false,
})
.await
{
@@ -230,7 +221,7 @@ async fn validate_config(
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Write,
)
.await
.context("Cannot attach Repo to this Server")?;
@@ -242,7 +233,7 @@ async fn validate_config(
let builder = super::get_check_permissions::<Builder>(
builder_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Read,
)
.await
.context("Cannot attach Repo to this Builder")?;

View File

@@ -1,8 +1,6 @@
use anyhow::Context;
use indexmap::IndexSet;
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant, komodo_timestamp,
permission::SpecificPermission,
Operation, ResourceTargetVariant, komodo_timestamp,
resource::Resource,
server::{
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
@@ -15,7 +13,6 @@ use mungos::mongodb::{Collection, bson::doc};
use crate::{
config::core_config,
helpers::query::get_system_info,
monitor::update_cache_for_server,
state::{action_states, db_client, server_status_cache},
};
@@ -32,22 +29,6 @@ impl super::KomodoResource for Server {
ResourceTargetVariant::Server
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Server(id.into())
}
fn creator_specific_permissions() -> IndexSet<SpecificPermission> {
[
SpecificPermission::Terminal,
SpecificPermission::Inspect,
SpecificPermission::Attach,
SpecificPermission::Logs,
SpecificPermission::Processes,
]
.into_iter()
.collect()
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().servers
@@ -57,11 +38,6 @@ impl super::KomodoResource for Server {
server: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let status = server_status_cache().get(&server.id).await;
let (terminals_disabled, container_exec_disabled) =
get_system_info(&server)
.await
.map(|i| (i.terminals_disabled, i.container_exec_disabled))
.unwrap_or((true, true));
ServerListItem {
name: server.name,
id: server.id,
@@ -77,8 +53,6 @@ impl super::KomodoResource for Server {
send_cpu_alerts: server.config.send_cpu_alerts,
send_mem_alerts: server.config.send_mem_alerts,
send_disk_alerts: server.config.send_disk_alerts,
terminals_disabled,
container_exec_disabled,
},
}
}

View File

@@ -0,0 +1,145 @@
use komodo_client::entities::{
MergePartial, Operation, ResourceTargetVariant,
resource::Resource,
server_template::{
PartialServerTemplateConfig, ServerTemplate,
ServerTemplateConfig, ServerTemplateConfigDiff,
ServerTemplateConfigVariant, ServerTemplateListItem,
ServerTemplateListItemInfo, ServerTemplateQuerySpecifics,
},
update::Update,
user::User,
};
use mungos::mongodb::{
Collection,
bson::{Document, to_document},
};
use crate::state::db_client;
impl super::KomodoResource for ServerTemplate {
type Config = ServerTemplateConfig;
type PartialConfig = PartialServerTemplateConfig;
type ConfigDiff = ServerTemplateConfigDiff;
type Info = ();
type ListItem = ServerTemplateListItem;
type QuerySpecifics = ServerTemplateQuerySpecifics;
fn resource_type() -> ResourceTargetVariant {
ResourceTargetVariant::ServerTemplate
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().server_templates
}
async fn to_list_item(
server_template: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let (template_type, instance_type) = match server_template.config
{
ServerTemplateConfig::Aws(config) => (
ServerTemplateConfigVariant::Aws.to_string(),
Some(config.instance_type),
),
ServerTemplateConfig::Hetzner(config) => (
ServerTemplateConfigVariant::Hetzner.to_string(),
Some(config.server_type.as_ref().to_string()),
),
};
ServerTemplateListItem {
name: server_template.name,
id: server_template.id,
tags: server_template.tags,
resource_type: ResourceTargetVariant::ServerTemplate,
info: ServerTemplateListItemInfo {
provider: template_type.to_string(),
instance_type,
},
}
}
async fn busy(_id: &String) -> anyhow::Result<bool> {
Ok(false)
}
// CREATE
fn create_operation() -> Operation {
Operation::CreateServerTemplate
}
fn user_can_create(user: &User) -> bool {
user.admin
}
async fn validate_create_config(
_config: &mut Self::PartialConfig,
_user: &User,
) -> anyhow::Result<()> {
Ok(())
}
async fn post_create(
_created: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
Ok(())
}
// UPDATE
fn update_operation() -> Operation {
Operation::UpdateServerTemplate
}
async fn validate_update_config(
_id: &str,
_config: &mut Self::PartialConfig,
_user: &User,
) -> anyhow::Result<()> {
Ok(())
}
fn update_document(
original: Resource<Self::Config, Self::Info>,
config: Self::PartialConfig,
) -> Result<Document, mungos::mongodb::bson::ser::Error> {
let config = original.config.merge_partial(config);
to_document(&config)
}
async fn post_update(
_updated: &Self,
_update: &mut Update,
) -> anyhow::Result<()> {
Ok(())
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameServerTemplate
}
// DELETE
fn delete_operation() -> Operation {
Operation::DeleteServerTemplate
}
async fn pre_delete(
_resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
Ok(())
}
async fn post_delete(
_resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
Ok(())
}
}

View File

@@ -1,11 +1,10 @@
use anyhow::Context;
use formatting::format_serror;
use indexmap::IndexSet;
use komodo_client::{
api::write::RefreshStackCache,
entities::{
Operation, ResourceTarget, ResourceTargetVariant,
permission::{PermissionLevel, SpecificPermission},
Operation, ResourceTargetVariant,
permission::PermissionLevel,
resource::Resource,
server::Server,
stack::{
@@ -13,7 +12,6 @@ use komodo_client::{
StackInfo, StackListItem, StackListItemInfo,
StackQuerySpecifics, StackServiceWithUpdate, StackState,
},
to_docker_compatible_name,
update::Update,
user::{User, stack_user},
},
@@ -46,30 +44,6 @@ impl super::KomodoResource for Stack {
ResourceTargetVariant::Stack
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::Stack(id.into())
}
fn validated_name(name: &str) -> String {
to_docker_compatible_name(name)
}
fn creator_specific_permissions() -> IndexSet<SpecificPermission> {
[
SpecificPermission::Inspect,
SpecificPermission::Logs,
SpecificPermission::Terminal,
]
.into_iter()
.collect()
}
fn inherit_specific_permissions_from(
_self: &Resource<Self::Config, Self::Info>,
) -> Option<ResourceTarget> {
ResourceTarget::Server(_self.config.server_id.clone()).into()
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().stacks
@@ -188,7 +162,7 @@ impl super::KomodoResource for Stack {
{
update.push_error_log(
"Refresh stack cache",
format_serror(&e.error.context("The stack cache has failed to refresh. This may be due to a misconfiguration of the Stack").into())
format_serror(&e.error.context("The stack cache has failed to refresh. This is likely due to a misconfiguration of the Stack").into())
);
};
if created.config.server_id.is_empty() {
@@ -336,7 +310,7 @@ async fn validate_config(
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Read.attach(),
PermissionLevel::Write,
)
.await
.context("Cannot attach stack to this Server")?;

View File

@@ -3,8 +3,7 @@ use formatting::format_serror;
use komodo_client::{
api::write::RefreshResourceSyncPending,
entities::{
Operation, ResourceTarget, ResourceTargetVariant,
komodo_timestamp,
Operation, ResourceTargetVariant, komodo_timestamp,
resource::Resource,
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncConfig,
@@ -37,10 +36,6 @@ impl super::KomodoResource for ResourceSync {
ResourceTargetVariant::ResourceSync
}
fn resource_target(id: impl Into<String>) -> ResourceTarget {
ResourceTarget::ResourceSync(id.into())
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().resource_syncs

View File

@@ -1,378 +0,0 @@
use std::{
collections::HashMap,
sync::{OnceLock, RwLock},
};
use anyhow::{Context, anyhow};
use async_timing_util::Timelength;
use chrono::Local;
use formatting::format_serror;
use komodo_client::{
api::execute::{RunAction, RunProcedure},
entities::{
ResourceTarget, ResourceTargetVariant, ScheduleFormat,
action::Action,
alert::{Alert, AlertData, SeverityLevel},
komodo_timestamp,
procedure::Procedure,
user::{action_user, procedure_user},
},
};
use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
alert::send_alerts,
api::execute::{ExecuteArgs, ExecuteRequest},
helpers::update::init_execution_update,
state::db_client,
};
pub fn spawn_schedule_executor() {
// Executor thread
tokio::spawn(async move {
loop {
let current_time = async_timing_util::wait_until_timelength(
Timelength::OneSecond,
0,
)
.await as i64;
let mut lock = schedules().write().unwrap();
let drained = lock.drain().collect::<Vec<_>>();
for (target, next_run) in drained {
match next_run {
Ok(next_run_time) if current_time >= next_run_time => {
tokio::spawn(async move {
match &target {
ResourceTarget::Action(id) => {
let action = match crate::resource::get::<Action>(
id,
)
.await
{
Ok(action) => action,
Err(e) => {
warn!(
"Scheduled action run on {id} failed | failed to get procedure | {e:?}"
);
return;
}
};
let request =
ExecuteRequest::RunAction(RunAction {
action: id.clone(),
});
let update = match init_execution_update(
&request,
action_user(),
)
.await
{
Ok(update) => update,
Err(e) => {
error!(
"Failed to make update for scheduled action run, action {id} is not being run | {e:#}"
);
return;
}
};
let ExecuteRequest::RunAction(request) = request
else {
unreachable!()
};
if let Err(e) = request
.resolve(&ExecuteArgs {
user: action_user().to_owned(),
update,
})
.await
{
warn!(
"Scheduled action run on {id} failed | {e:?}"
);
}
update_schedule(&action);
if action.config.schedule_alert {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Ok,
data: AlertData::ScheduleRun {
resource_type: ResourceTargetVariant::Action,
id: action.id,
name: action.name,
},
};
send_alerts(&[alert]).await
}
}
ResourceTarget::Procedure(id) => {
let procedure = match crate::resource::get::<
Procedure,
>(id)
.await
{
Ok(procedure) => procedure,
Err(e) => {
warn!(
"Scheduled procedure run on {id} failed | failed to get procedure | {e:?}"
);
return;
}
};
let request =
ExecuteRequest::RunProcedure(RunProcedure {
procedure: id.clone(),
});
let update = match init_execution_update(
&request,
procedure_user(),
)
.await
{
Ok(update) => update,
Err(e) => {
error!(
"Failed to make update for scheduled procedure run, procedure {id} is not being run | {e:#}"
);
return;
}
};
let ExecuteRequest::RunProcedure(request) = request
else {
unreachable!()
};
if let Err(e) = request
.resolve(&ExecuteArgs {
user: procedure_user().to_owned(),
update,
})
.await
{
warn!(
"Scheduled procedure run on {id} failed | {e:?}"
);
}
update_schedule(&procedure);
if procedure.config.schedule_alert {
let alert = Alert {
id: Default::default(),
target,
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Ok,
data: AlertData::ScheduleRun {
resource_type:
ResourceTargetVariant::Procedure,
id: procedure.id,
name: procedure.name,
},
};
send_alerts(&[alert]).await
}
}
_ => unreachable!(),
}
});
}
other => {
lock.insert(target, other);
continue;
}
};
}
}
});
// Updater thread
tokio::spawn(async move {
update_schedules().await;
loop {
async_timing_util::wait_until_timelength(
Timelength::FiveMinutes,
500,
)
.await;
update_schedules().await
}
});
}
type UnixTimestampMs = i64;
type Schedules =
HashMap<ResourceTarget, Result<UnixTimestampMs, String>>;
fn schedules() -> &'static RwLock<Schedules> {
static SCHEDULES: OnceLock<RwLock<Schedules>> = OnceLock::new();
SCHEDULES.get_or_init(Default::default)
}
pub fn get_schedule_item_info(
target: &ResourceTarget,
) -> (Option<i64>, Option<String>) {
match schedules().read().unwrap().get(target) {
Some(Ok(time)) => (Some(*time), None),
Some(Err(e)) => (None, Some(e.clone())),
None => (None, None),
}
}
pub fn cancel_schedule(target: &ResourceTarget) {
schedules().write().unwrap().remove(target);
}
pub async fn update_schedules() {
let (procedures, actions) = tokio::join!(
find_collect(&db_client().procedures, None, None),
find_collect(&db_client().actions, None, None),
);
let procedures = match procedures
.context("failed to get all procedures from db")
{
Ok(procedures) => procedures,
Err(e) => {
error!("failed to get procedures for schedule update | {e:#}");
Vec::new()
}
};
let actions =
match actions.context("failed to get all actions from db") {
Ok(actions) => actions,
Err(e) => {
error!("failed to get actions for schedule update | {e:#}");
Vec::new()
}
};
// clear out any schedules which don't match to existing resources
{
let mut lock = schedules().write().unwrap();
lock.retain(|target, _| match target {
ResourceTarget::Action(id) => {
actions.iter().any(|action| &action.id == id)
}
ResourceTarget::Procedure(id) => {
procedures.iter().any(|procedure| &procedure.id == id)
}
_ => unreachable!(),
});
}
for procedure in procedures {
update_schedule(&procedure);
}
for action in actions {
update_schedule(&action);
}
}
/// Re/spawns the schedule for the given procedure
pub fn update_schedule(schedule: impl HasSchedule) {
// Cancel any existing schedule for the procedure
cancel_schedule(&schedule.target());
if !schedule.enabled() || schedule.schedule().is_empty() {
return;
}
schedules().write().unwrap().insert(
schedule.target(),
find_next_occurrence(schedule)
.map_err(|e| format_serror(&e.into())),
);
}
/// Finds the next run occurence in UTC ms.
fn find_next_occurrence(
schedule: impl HasSchedule,
) -> anyhow::Result<i64> {
let cron = match schedule.format() {
ScheduleFormat::Cron => croner::Cron::new(schedule.schedule())
.with_seconds_required()
.with_dom_and_dow()
.parse()
.context("Failed to parse schedule CRON")?,
ScheduleFormat::English => {
let cron =
english_to_cron::str_cron_syntax(schedule.schedule())
.map_err(|e| {
anyhow!("Failed to parse english to cron | {e:?}")
})?
.split(' ')
// croner does not accept year
.take(6)
.collect::<Vec<_>>()
.join(" ");
croner::Cron::new(&cron)
.with_seconds_required()
.with_dom_and_dow()
.parse()
.with_context(|| {
format!("Failed to parse schedule CRON: {cron}")
})?
}
};
let next = if schedule.timezone().is_empty() {
let tz_time = chrono::Local::now().with_timezone(&Local);
cron
.find_next_occurrence(&tz_time, false)
.context("Failed to find next run time")?
.timestamp_millis()
} else {
let tz: chrono_tz::Tz = schedule
.timezone()
.parse()
.context("Failed to parse schedule timezone")?;
let tz_time = chrono::Local::now().with_timezone(&tz);
cron
.find_next_occurrence(&tz_time, false)
.context("Failed to find next run time")?
.timestamp_millis()
};
Ok(next)
}
pub trait HasSchedule {
fn target(&self) -> ResourceTarget;
fn enabled(&self) -> bool;
fn format(&self) -> ScheduleFormat;
fn schedule(&self) -> &str;
fn timezone(&self) -> &str;
}
impl HasSchedule for &Procedure {
fn target(&self) -> ResourceTarget {
ResourceTarget::Procedure(self.id.clone())
}
fn enabled(&self) -> bool {
self.config.schedule_enabled
}
fn format(&self) -> ScheduleFormat {
self.config.schedule_format
}
fn schedule(&self) -> &str {
&self.config.schedule
}
fn timezone(&self) -> &str {
&self.config.schedule_timezone
}
}
impl HasSchedule for &Action {
fn target(&self) -> ResourceTarget {
ResourceTarget::Action(self.id.clone())
}
fn enabled(&self) -> bool {
self.config.schedule_enabled
}
fn format(&self) -> ScheduleFormat {
self.config.schedule_format
}
fn schedule(&self) -> &str {
&self.config.schedule
}
fn timezone(&self) -> &str {
&self.config.schedule_timezone
}
}

View File

@@ -36,13 +36,9 @@ pub async fn execute_compose<T: ExecuteCompose>(
mut update: Update,
extras: T::Extras,
) -> anyhow::Result<Update> {
let (stack, server) = get_stack_and_server(
stack,
user,
PermissionLevel::Execute.into(),
true,
)
.await?;
let (stack, server) =
get_stack_and_server(stack, user, PermissionLevel::Execute, true)
.await?;
// get the action state for the stack (or insert default).
let action_state =

View File

@@ -1,16 +1,13 @@
use anyhow::{Context, anyhow};
use komodo_client::entities::{
permission::PermissionLevelAndSpecifics,
permission::PermissionLevel,
server::{Server, ServerState},
stack::Stack,
user::User,
};
use regex::Regex;
use crate::{
helpers::query::get_server_with_state,
permission::get_check_permissions,
};
use crate::{helpers::query::get_server_with_state, resource};
pub mod execute;
pub mod remote;
@@ -19,11 +16,15 @@ pub mod services;
pub async fn get_stack_and_server(
stack: &str,
user: &User,
permissions: PermissionLevelAndSpecifics,
permission_level: PermissionLevel,
block_if_server_unreachable: bool,
) -> anyhow::Result<(Stack, Server)> {
let stack =
get_check_permissions::<Stack>(stack, user, permissions).await?;
let stack = resource::get_check_permissions::<Stack>(
stack,
user,
permission_level,
)
.await?;
if stack.config.server_id.is_empty() {
return Err(anyhow!("Stack has no server configured"));

View File

@@ -26,7 +26,7 @@ pub async fn get_repo_compose_contents(
let (repo_path, _logs, hash, message) =
ensure_remote_repo(clone_args)
.await
.context("Failed to clone stack repo")?;
.context("failed to clone stack repo")?;
let run_directory = repo_path.join(&stack.config.run_directory);
// This will remove any intermediate '/./' which can be a problem for some OS.
@@ -44,7 +44,7 @@ pub async fn get_repo_compose_contents(
}
// If file does not exist, will show up in err case so the log is handled
match fs::read_to_string(&file_path).with_context(|| {
format!("Failed to read file contents from {file_path:?}")
format!("failed to read file contents from {file_path:?}")
}) {
Ok(contents) => successful.push(FileContents {
path: path.to_string(),
@@ -102,6 +102,6 @@ pub async fn ensure_remote_repo(
&[],
)
.await
.context("Failed to clone stack repo")
.context("failed to clone stack repo")
.map(|res| (repo_path, res.logs, res.hash, res.message))
}

View File

@@ -8,15 +8,18 @@ pub fn extract_services_from_stack(
stack: &Stack,
) -> Vec<StackServiceNames> {
if let Some(mut services) = stack.info.deployed_services.clone() {
for service in services.iter_mut().filter(|s| s.image.is_empty())
{
service.image = stack
.info
.latest_services
.iter()
.find(|s| s.service_name == service.service_name)
.map(|s| s.image.clone())
.unwrap_or_default();
if services.iter().any(|service| service.image.is_empty()) {
for service in
services.iter_mut().filter(|s| s.image.is_empty())
{
service.image = stack
.info
.latest_services
.iter()
.find(|s| s.service_name == service.service_name)
.map(|s| s.image.clone())
.unwrap_or_default();
}
}
services
} else {

View File

@@ -1,228 +0,0 @@
use std::str::FromStr;
use futures::future::join_all;
use komodo_client::{
api::write::{CreateBuilder, CreateServer},
entities::{
ResourceTarget,
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
komodo_timestamp,
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::Log,
user::system_user,
},
};
use mungos::{
find::find_collect,
mongodb::bson::{Document, doc, oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
api::write::WriteArgs, config::core_config, helpers::random_string,
resource, state::db_client,
};
/// This function should be run on startup,
/// after the db client has been initialized
pub async fn on_startup() {
tokio::join!(
in_progress_update_cleanup(),
open_alert_cleanup(),
ensure_first_server_and_builder(),
clean_up_server_templates(),
);
}
async fn in_progress_update_cleanup() {
let log = Log::error(
"Komodo shutdown",
String::from(
"Komodo shutdown during execution. If this is a build, the builder may not have been terminated.",
),
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
if let Err(e) = db_client()
.updates
.update_many(
doc! { "status": "InProgress" },
doc! {
"$set": {
"status": "Complete",
"success": false,
},
"$push": {
"logs": log
}
},
)
.await
{
error!("failed to cleanup in progress updates on startup | {e:#}")
}
}
/// Run on startup, ensure open alerts pointing to invalid resources are closed.
async fn open_alert_cleanup() {
let db = db_client();
let Ok(alerts) =
find_collect(&db.alerts, doc! { "resolved": false }, None)
.await
.inspect_err(|e| {
error!(
"failed to list all alerts for startup open alert cleanup | {e:?}"
)
})
else {
return;
};
let futures = alerts.into_iter().map(|alert| async move {
match alert.target {
ResourceTarget::Server(id) => {
resource::get::<Server>(&id)
.await
.is_err()
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
}
ResourceTarget::ResourceSync(id) => {
resource::get::<ResourceSync>(&id)
.await
.is_err()
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
}
// No other resources should have open alerts.
_ => ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok(),
}
});
let to_update_ids = join_all(futures)
.await
.into_iter()
.flatten()
.collect::<Vec<_>>();
if let Err(e) = db
.alerts
.update_many(
doc! { "_id": { "$in": to_update_ids } },
doc! { "$set": {
"resolved": true,
"resolved_ts": komodo_timestamp()
} },
)
.await
{
error!(
"failed to clean up invalid open alerts on startup | {e:#}"
)
}
}
/// Ensures a default server / builder exists with the defined address
async fn ensure_first_server_and_builder() {
let first_server = &core_config().first_server;
if first_server.is_empty() {
return;
}
let db = db_client();
let Ok(server) = db
.servers
.find_one(Document::new())
.await
.inspect_err(|e| error!("Failed to initialize 'first_server'. Failed to query db. {e:?}"))
else {
return;
};
let server = if let Some(server) = server {
server
} else {
match (CreateServer {
name: format!("server-{}", random_string(5)),
config: PartialServerConfig {
address: Some(first_server.to_string()),
enabled: Some(true),
..Default::default()
},
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
{
Ok(server) => server,
Err(e) => {
error!(
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
e.error
);
return;
}
}
};
let Ok(None) = db.builders
.find_one(Document::new()).await
.inspect_err(|e| error!("Failed to initialize 'first_builder' | Failed to query db | {e:?}")) else {
return;
};
if let Err(e) = (CreateBuilder {
name: String::from("local"),
config: PartialBuilderConfig::Server(
PartialServerBuilderConfig {
server_id: Some(server.id),
},
),
})
.resolve(&WriteArgs {
user: system_user().to_owned(),
})
.await
{
error!(
"Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}",
e.error
);
}
}
/// v1.17.5 removes the ServerTemplate resource.
/// References to this resource type need to be cleaned up
/// to avoid type errors reading from the database.
async fn clean_up_server_templates() {
let db = db_client();
tokio::join!(
async {
db.permissions
.delete_many(doc! {
"resource_target.type": "ServerTemplate",
})
.await
.expect(
"Failed to clean up server template permissions on db",
);
},
async {
db.updates
.delete_many(doc! { "target.type": "ServerTemplate" })
.await
.expect("Failed to clean up server template updates on db");
},
async {
db.users
.update_many(
Document::new(),
doc! { "$unset": { "recents.ServerTemplate": 1, "all.ServerTemplate": 1 } }
)
.await
.expect("Failed to clean up server template updates on db");
},
async {
db.user_groups
.update_many(
Document::new(),
doc! { "$unset": { "all.ServerTemplate": 1 } },
)
.await
.expect("Failed to clean up server template updates on db");
},
);
}

View File

@@ -424,7 +424,7 @@ fn build_cache_for_deployment<'a>(
let deployed_version = status
.container
.as_ref()
.and_then(|c| c.image.as_ref()?.split(':').next_back())
.and_then(|c| c.image.as_ref()?.split(':').last())
.unwrap_or("0.0.0");
match build_version_cache.get(build_id) {
Some(version) if deployed_version != version => {

View File

@@ -270,6 +270,9 @@ pub fn extend_resources(
resources
.builders
.extend(filter_by_tag(more.builders, match_tags));
resources
.server_templates
.extend(filter_by_tag(more.server_templates, match_tags));
resources
.resource_syncs
.extend(filter_by_tag(more.resource_syncs, match_tags));

View File

@@ -2,7 +2,7 @@ use std::{collections::HashMap, str::FromStr};
use anyhow::anyhow;
use komodo_client::entities::{
ResourceTargetVariant,
ResourceTarget, ResourceTargetVariant,
action::Action,
alerter::Alerter,
build::Build,
@@ -11,6 +11,7 @@ use komodo_client::entities::{
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
tag::Tag,
@@ -54,6 +55,8 @@ pub struct ToUpdateItem<T: Default> {
}
pub trait ResourceSyncTrait: ToToml + Sized {
fn resource_target(id: String) -> ResourceTarget;
/// To exclude resource syncs with "file_contents" (they aren't compatible)
fn include_resource(
name: &String,
@@ -165,6 +168,7 @@ pub struct AllResourcesById {
pub actions: HashMap<String, Action>,
pub builders: HashMap<String, Builder>,
pub alerters: HashMap<String, Alerter>,
pub templates: HashMap<String, ServerTemplate>,
pub syncs: HashMap<String, ResourceSync>,
}
@@ -208,6 +212,10 @@ impl AllResourcesById {
id_to_tags, match_tags,
)
.await?,
templates: crate::resource::get_id_to_resource_map::<
ServerTemplate,
>(id_to_tags, match_tags)
.await?,
syncs: crate::resource::get_id_to_resource_map::<ResourceSync>(
id_to_tags, match_tags,
)

Some files were not shown because too many files have changed in this diff Show More