Compare commits

..

108 Commits

Author SHA1 Message Date
mbecker20
f458106263 batch Build builds 2025-03-13 00:55:29 -04:00
mbecker20
f022e83414 use Tooltip component instead of HoverCard for mobile compatibility 2025-03-13 00:01:26 -04:00
mbecker20
93ccc1ce7f update some deps 2025-03-12 23:39:39 -04:00
mbecker20
d69b7a74e7 isolate stacks / deployments with pending updates 2025-03-12 22:57:34 -04:00
wlatic
96cfaf3355 Update config.tsx (#358) 2025-03-12 22:21:06 -04:00
komodo
b7587e5426 soften tag opacity a bit 2025-03-12 22:21:06 -04:00
komodo
ab874211ed default new tag colors to grey 2025-03-12 22:21:06 -04:00
komodo
7094c4e3c5 fix sync summary count ok 2025-03-12 22:21:06 -04:00
komodo
386d463a0a colored tags 2025-03-12 22:21:05 -04:00
komodo
65fa969686 improve variable value table overflow 2025-03-12 22:21:05 -04:00
komodo
92099f311f use jsonwebtoken 2025-03-12 22:21:05 -04:00
komodo
9186d5fd50 resource sync toggle resource / variable / user group inclusion independantly 2025-03-12 22:21:05 -04:00
komodo
54e766f48b update deps 2025-03-12 22:21:05 -04:00
komodo
a1dd895b19 add KOMODO_LOCK_LOGIN_CREDENTIALS_FOR in config doc 2025-03-12 22:21:05 -04:00
komodo
3f767ed42e cycle the oidc client on interval to ensure up to date JWKs 2025-03-12 22:21:05 -04:00
komodo
4c14c33339 update docs on OIDC and client secret 2025-03-12 22:21:05 -04:00
komodo
ac43572acb OIDC: Support PKCE auth (secret optional) 2025-03-12 22:21:05 -04:00
komodo
c5451bed8e use png in topbar logo, svg quality sometimes bad 2025-03-12 22:21:05 -04:00
komodo
8f568ea8d1 update .devcontainer / dev docs for updated runfile 2025-03-12 22:21:05 -04:00
komodo
dc1062bb57 rename test.compose.yaml to dev.compose.yaml, and update runfile 2025-03-12 22:21:05 -04:00
komodo
a12f21ca7a rust 2024 and fmt 2025-03-12 22:21:05 -04:00
komodo
a0e5a86c89 add update_available query parameter to filter for only stacks /deployments with available update 2025-03-12 22:21:05 -04:00
komodo
2f9cf82cee Fix actions when core on https 2025-03-12 22:21:05 -04:00
komodo
a118696d54 add yarn install to runfile 2025-03-12 22:21:05 -04:00
mbecker20
d6abda063a stack edits on localstorage and show last deployed config 2025-03-12 22:21:05 -04:00
mbecker20
8fb4bea790 store sync edits on localstorage 2025-03-12 22:21:05 -04:00
mbecker20
8cc1d0106a rust 1.85 2025-03-12 22:21:05 -04:00
mbecker20
c0b213dfd4 clean up service_args 2025-03-12 22:21:05 -04:00
mbecker20
5ad00a41ca dev-5 fix the stack service executions 2025-03-12 22:21:05 -04:00
mbecker20
c91e107245 auto update all service deploy option 2025-03-12 22:21:05 -04:00
mbecker20
66a80c1262 Stacks executions take list of services -- Auto update only redeploys services with update 2025-03-12 22:21:05 -04:00
mbecker20
dd0d99dabc resource sync only add escaping on toml between the """ 2025-03-12 22:21:05 -04:00
mbecker20
13a0c5a2fa dev-4 2025-03-12 22:21:05 -04:00
mbecker20
177ce9dd45 update openidconnect dependency, and use reqwest rustls-tls-native-roots 2025-03-12 22:21:05 -04:00
mbecker20
d09a864453 update most deps 2025-03-12 22:21:05 -04:00
mbecker20
29be281f11 don't prune images if server not enabled 2025-03-12 22:21:05 -04:00
mbecker20
adb1a49305 make sure parent directories exist whenever writing files 2025-03-12 22:21:05 -04:00
mbecker20
014d3b2c13 ResourceSync state resolution refinement 2025-03-12 22:21:05 -04:00
mbecker20
42b1f7b6c8 revert to login screen whenever the call to check login fails 2025-03-12 22:21:05 -04:00
mbecker20
6ee707576b lock certain users username / password, prevent demo creds from being changed. 2025-03-12 22:21:05 -04:00
mbecker20
999ad9a4ce fix rand 2025-03-12 22:21:05 -04:00
mbecker20
94afb432f3 fix all clippy lints 2025-03-12 22:21:05 -04:00
mbecker20
48e871d400 standardize running commands with interpolation / output sanitizations 2025-03-12 22:21:05 -04:00
mbecker20
723853e92d Improve resource sync Execute / Pending view selector 2025-03-12 22:21:05 -04:00
mbecker20
24b2a8ab75 ResourceSync: pending view toggle between "Execute" vs "Commit" sync direction 2025-03-12 22:21:05 -04:00
mbecker20
d541c4c202 set branch on git init folder 2025-03-12 22:21:05 -04:00
mbecker20
a09ced896f init sync file new repo 2025-03-12 22:21:05 -04:00
mbecker20
d0890436c3 Stack: Fix git repo new compose file initialization 2025-03-12 22:21:05 -04:00
mbecker20
54447fe56b show provider usernames from config file 2025-03-12 22:21:05 -04:00
mbecker20
625295d50b filters wrap 2025-03-12 22:21:05 -04:00
mbecker20
e23e37ac92 give server stat charts labels 2025-03-12 22:21:05 -04:00
mbecker20
d4e058a532 improve WriteComposeContentsToHost instrument fields 2025-03-12 22:21:05 -04:00
mbecker20
3890a287d1 ServerTemplate description 2025-03-12 22:21:05 -04:00
mbecker20
e169bbbd31 dev-3 2025-03-12 22:21:05 -04:00
mbecker20
d2cb543c76 use komodo_client.subscribe_to_update_websocket, and click indicator to reconnect 2025-03-12 22:21:05 -04:00
mbecker20
59da3812a9 Fix unclear ComposePull log re #244 2025-03-12 22:21:05 -04:00
mbecker20
ce10cbe684 improve pull to git init on existing folder without .git 2025-03-12 22:21:05 -04:00
unsync
dd901f7369 feature: improve tables quick actions on mobile (#312)
* feature: improve tables quick actions on mobile

* review: fix gap4

* review: use flex-wrap
2025-03-12 22:21:05 -04:00
mbecker20
7908149226 choose which stack services to include in logs 2025-03-12 22:21:05 -04:00
mbecker20
0283930207 fix api name chnage 2025-03-12 22:21:05 -04:00
mbecker20
487664a25a 1.17.0-dev-2 2025-03-12 22:21:05 -04:00
mbecker20
130ba1f54f Add all services stack log 2025-03-12 22:21:05 -04:00
mbecker20
2758b91e31 improve update indicator style and also put on home screen 2025-03-12 22:21:05 -04:00
mbecker20
d240b5c959 requery alerts more often 2025-03-12 22:21:05 -04:00
mbecker20
f8b8f76569 FIx PullStack re #302 and record docker compose config on stack deploy 2025-03-12 22:21:05 -04:00
mbecker20
d2b27294be improve First Login docs 2025-03-12 22:21:05 -04:00
unsync
c44313a9f1 feature: allow docker image text to overflow in table (#301)
* feature: allow docker image text to overflow in table

* review: use break-words

* wip: revert line break in css file

* feature: update devcontainer node release
2025-03-12 22:21:05 -04:00
mbecker20
1cc967f215 add save button to config bottom 2025-03-12 22:21:05 -04:00
mbecker20
53fcd899a4 add config save button in desktop sidebar navigator 2025-03-12 22:21:05 -04:00
mbecker20
b296971c1a add donate button docsite 2025-03-12 22:21:05 -04:00
mbecker20
6a4c88f8f4 typescript subscribe_to_update_websocket 2025-03-12 22:21:05 -04:00
mbecker20
51992c477d docs new organization 2025-03-12 22:21:05 -04:00
mbecker20
2c98c6ea40 fix new compose images 2025-03-12 22:21:05 -04:00
mbecker20
14658ba722 more legible favicon 2025-03-12 22:21:05 -04:00
mbecker20
192073a12c fix login screen logo 2025-03-12 22:21:05 -04:00
mbecker20
275f204a30 dev-1 2025-03-12 22:21:05 -04:00
mbecker20
a196e1ff7f remove example from cargo toml workspace 2025-03-12 22:21:05 -04:00
mbecker20
3ef5367ed1 mbecker20 -> moghtech 2025-03-12 22:21:05 -04:00
Maxwell Becker
ca2c728bf3 Remove .git from remote_url (#299)
Remove .git from remote_url

Co-authored-by: Deon Marshall <dmarshall@ccp.com.au>
2025-03-12 22:21:05 -04:00
unsync
f0d22642b2 feature: interpolate secrets in custom alerter (#289)
* feature: interpolate secrets in custom alerter

* fix rust warning

* review: sanitize errors

* review: sanitize error message
2025-03-12 22:21:05 -04:00
unsync
5d54876fff feature: add post_deploy command (#288)
* feature: add post_deploy command

* review: do not run post_deploy if deploy failed
2025-03-12 22:21:05 -04:00
mbecker20
c3c2f57db4 1.17.0-dev 2025-03-12 22:21:05 -04:00
unsync
30043e32a4 feature: use the repo path instead of name in GetLatestCommit (#282)
* Update repo path handling in commit fetching

- Changed `name` to `path` for repository identification.
- Updated cache update function to use the new path field.
- Improved error message for non-directory repo paths.

* feat: use optional name and path in GetLatestCommit

* review: don't use optional for name

* review: use helper

* review: remove redundant to_string()
2025-03-12 22:21:05 -04:00
mbecker20
33a2897c2c update available deployment table 2025-03-12 22:21:05 -04:00
mbecker20
2ae1313170 show update available stack table 2025-03-12 22:21:05 -04:00
mbecker20
b1e38714cf finish oidc comment 2025-03-12 22:21:05 -04:00
mbecker20
43fe613f85 clean up rust client websocket subscription 2025-03-12 22:21:05 -04:00
mbecker20
6a8f46ee7a escape incoming sync backslashes (BREAKING) 2025-03-12 22:21:05 -04:00
mbecker20
cbb323fce5 rename Test Alerter button 2025-03-12 22:21:05 -04:00
mbecker20
4d85c601d3 simplify network stats 2025-03-12 22:21:05 -04:00
mbecker20
b03a4db3ce komodo-logo 2025-03-12 22:21:05 -04:00
mbecker20
a19f2afc74 higher quality / colored icons 2025-03-12 22:21:05 -04:00
mbecker20
b55e5c584b Add test alerter button 2025-03-12 22:21:05 -04:00
mbecker20
4d1a975c79 fix last axum updates 2025-03-12 22:21:05 -04:00
mbecker20
f45fb6f635 axum update :param to {param} syntax 2025-03-12 22:21:05 -04:00
mbecker20
5c6e09a48c rust 1.84.0 2025-03-12 22:21:05 -04:00
mbecker20
6ffb421662 test alert implementation 2025-03-12 22:21:05 -04:00
mbecker20
85f1cc699c add entities / message for test alerter 2025-03-12 22:21:05 -04:00
mbecker20
3c4e0b23df the komodo env file should be highest priority over additional files 2025-03-12 22:21:05 -04:00
mbecker20
a524c3ac2d clean up cors 2025-03-12 22:21:05 -04:00
mbecker20
e89a5bface just make it 1.17.0 2025-03-12 22:21:05 -04:00
mbecker20
d67130f6ee bump aws deps 2025-03-12 22:21:05 -04:00
mbecker20
d76736b71e axum to 0.8 2025-03-12 22:21:05 -04:00
mbecker20
c3857e93b6 resource2 not really a benefit 2025-03-12 22:21:05 -04:00
mbecker20
c879c393be format 2025-03-12 22:21:05 -04:00
mbecker20
7bae90b661 fmt 2025-03-12 22:21:05 -04:00
mbecker20
bef9d9397c resolver v3
add new ec2 instance types

clean up testing config

document the libraries a bit

clean up main

update sysinfo and otel

update client resolver 3.0

resolver v3 prog

clean up gitignore

implement periphery resolver v3

clean up

core read api v3

more prog

execute api

missing apis

compiling

1.16.13

work on more granular traits

prog on crud
2025-03-12 22:21:05 -04:00
Maarten Kossen
9c841e5bdc Change amd64 to arm64 to prevent installing aarch64 binary on an x86_64 system. (#357) 2025-03-12 19:20:49 -07:00
259 changed files with 4526 additions and 3210 deletions

View File

@@ -23,7 +23,7 @@ services:
db:
extends:
file: ../test.compose.yaml
file: ../dev.compose.yaml
service: ferretdb
volumes:

869
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,8 +8,8 @@ members = [
]
[workspace.package]
version = "1.17.0-dev-3"
edition = "2021"
version = "1.17.0-dev-7"
edition = "2024"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/moghtech/komodo"
@@ -40,61 +40,62 @@ derive_variants = "1.0.0"
mongo_indexed = "2.0.1"
resolver_api = "3.0.0"
toml_pretty = "1.1.2"
mungos = "1.1.0"
mungos = "3.2.0"
svi = "1.0.1"
# ASYNC
reqwest = { version = "0.12.12", default-features = false, features = ["json", "rustls-tls"] }
tokio = { version = "1.43.0", features = ["full"] }
reqwest = { version = "0.12.12", default-features = false, features = ["json", "rustls-tls-native-roots"] }
tokio = { version = "1.44.0", features = ["full"] }
tokio-util = "0.7.13"
futures = "0.3.31"
futures-util = "0.3.31"
arc-swap = "1.7.1"
# SERVER
axum-extra = { version = "0.10.0", features = ["typed-header"] }
tower-http = { version = "0.6.2", features = ["fs", "cors"] }
axum-server = { version = "0.7.1", features = ["tls-rustls"] }
axum = { version = "0.8.1", features = ["ws", "json", "macros"] }
tokio-tungstenite = "0.26.1"
tokio-tungstenite = "0.26.2"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.217", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.135"
serde = { version = "1.0.219", features = ["derive"] }
strum = { version = "0.27.1", features = ["derive"] }
serde_json = "1.0.140"
serde_yaml = "0.9.34"
toml = "0.8.19"
toml = "0.8.20"
# ERROR
anyhow = "1.0.95"
thiserror = "2.0.11"
anyhow = "1.0.97"
thiserror = "2.0.12"
# LOGGING
opentelemetry-otlp = { version = "0.27.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.27.1", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.28.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.28.0", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.19", features = ["json"] }
opentelemetry-semantic-conventions = "0.27.0"
tracing-opentelemetry = "0.28.0"
opentelemetry = "0.27.1"
opentelemetry-semantic-conventions = "0.28.0"
tracing-opentelemetry = "0.29.0"
opentelemetry = "0.28.0"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.26", features = ["derive"] }
clap = { version = "4.5.32", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.12.0", features = ["v4", "fast-rng", "serde"] }
openidconnect = "3.5.0"
uuid = { version = "1.15.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "9.3.1", default-features = false }
openidconnect = "4.0.0"
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.16.0"
bcrypt = "0.17.0"
base64 = "0.22.1"
rustls = "0.23.21"
rustls = "0.23.23"
hmac = "0.12.1"
sha2 = "0.10.8"
rand = "0.8.5"
jwt = "0.16.0"
rand = "0.9.0"
hex = "0.4.3"
# SYSTEM
@@ -102,8 +103,9 @@ bollard = "0.18.1"
sysinfo = "0.33.1"
# CLOUD
aws-config = "1.5.13"
aws-sdk-ec2 = "1.101.0"
aws-config = "1.6.0"
aws-sdk-ec2 = "1.117.0"
aws-credential-types = "1.2.2"
# MISC
derive_builder = "0.20.2"

View File

@@ -1,7 +1,7 @@
## Builds the Komodo Core and Periphery binaries
## for a specific architecture.
FROM rust:1.84.1-bullseye AS builder
FROM rust:1.85.0-bullseye AS builder
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./

View File

@@ -37,9 +37,10 @@ mungos.workspace = true
slack.workspace = true
svi.workspace = true
# external
axum-server.workspace = true
aws-credential-types.workspace = true
ordered_hash_map.workspace = true
openidconnect.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-config.workspace = true
@@ -51,6 +52,7 @@ serde_yaml.workspace = true
typeshare.workspace = true
octorust.workspace = true
wildcard.workspace = true
arc-swap.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
@@ -71,5 +73,5 @@ envy.workspace = true
rand.workspace = true
hmac.workspace = true
sha2.workspace = true
jwt.workspace = true
jsonwebtoken.workspace = true
hex.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.84.1-bullseye AS core-builder
FROM rust:1.85.0-bullseye AS core-builder
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./

View File

@@ -94,7 +94,9 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to = fmt_docker_container_state(to);
format!("📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
format!(
"📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
)
}
AlertData::DeploymentImageUpdateAvailable {
id,
@@ -104,7 +106,9 @@ pub async fn send_alert(
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!("⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}")
format!(
"⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}"
)
}
AlertData::DeploymentAutoUpdated {
id,
@@ -114,7 +118,9 @@ pub async fn send_alert(
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!("⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}")
format!(
"⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}"
)
}
AlertData::StackStateChange {
id,
@@ -126,7 +132,9 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to = fmt_stack_state(to);
format!("🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
format!(
"🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
)
}
AlertData::StackImageUpdateAvailable {
id,
@@ -137,7 +145,9 @@ pub async fn send_alert(
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!("⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}")
format!(
"⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}"
)
}
AlertData::StackAutoUpdated {
id,
@@ -150,13 +160,17 @@ pub async fn send_alert(
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images = images.join(", ");
format!("⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}")
format!(
"⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}"
)
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!("{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}")
format!(
"{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}"
)
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
@@ -167,7 +181,9 @@ pub async fn send_alert(
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!("{level} | Build **{name}** failed\nversion: **v{version}**\n{link}")
format!(
"{level} | Build **{name}** failed\nversion: **v{version}**\n{link}"
)
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);

View File

@@ -1,21 +1,21 @@
use std::collections::HashSet;
use ::slack::types::Block;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use komodo_client::entities::{
ResourceTargetVariant,
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
alerter::*,
deployment::DeploymentState,
stack::StackState,
ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use std::collections::HashSet;
use tracing::Instrument;
use crate::{config::core_config, state::db_client};
use crate::helpers::interpolate::interpolate_variables_secrets_into_string;
use crate::helpers::query::get_variables_and_secrets;
use crate::{config::core_config, state::db_client};
mod discord;
mod slack;
@@ -136,7 +136,6 @@ async fn send_custom_alert(
url: &str,
alert: &Alert,
) -> anyhow::Result<()> {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
@@ -156,9 +155,14 @@ async fn send_custom_alert(
.send()
.await
.map_err(|e| {
let replacers = secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error = svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!("Error with request: {}", sanitized_error))
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request: {}",
sanitized_error
))
})
.context("failed at post request to alerter")?;
let status = res.status();

View File

@@ -73,7 +73,9 @@ pub async fn send_alert(
let region = fmt_region(region);
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
let text = format!(
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%*"
);
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -87,7 +89,9 @@ pub async fn send_alert(
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
let text = format!(
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈"
);
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -113,7 +117,9 @@ pub async fn send_alert(
let percentage = 100.0 * used_gb / total_gb;
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let text = format!(
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾"
);
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -130,7 +136,9 @@ pub async fn send_alert(
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let text = format!(
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾"
);
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -160,7 +168,9 @@ pub async fn send_alert(
let percentage = 100.0 * used_gb / total_gb;
match alert.level {
SeverityLevel::Ok => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let text = format!(
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿"
);
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -169,12 +179,17 @@ pub async fn send_alert(
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}
_ => {
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let text = format!(
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿"
);
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -183,7 +198,10 @@ pub async fn send_alert(
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
];
(text, blocks.into())
}

View File

@@ -1,6 +1,6 @@
use std::{sync::OnceLock, time::Instant};
use axum::{http::HeaderMap, routing::post, Router};
use axum::{Router, http::HeaderMap, routing::post};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::auth::*, entities::user::User};
use resolver_api::Resolve;
@@ -105,8 +105,7 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
&& !config.google_oauth.secret.is_empty(),
oidc: config.oidc_enabled
&& !config.oidc_provider.is_empty()
&& !config.oidc_client_id.is_empty()
&& !config.oidc_client_secret.is_empty(),
&& !config.oidc_client_id.is_empty(),
registration_disabled: config.disable_user_registration,
}
})

View File

@@ -67,7 +67,7 @@ impl Resolve<ExecuteArgs> for RunAction {
) -> serror::Result<Update> {
let mut action = resource::get_check_permissions::<Action>(
&self.action,
&user,
user,
PermissionLevel::Execute,
)
.await?;
@@ -111,19 +111,31 @@ impl Resolve<ExecuteArgs> for RunAction {
let path = core_config().action_directory.join(&file);
if let Some(parent) = path.parent() {
let _ = fs::create_dir_all(parent).await;
fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to initialize Action file parent directory {parent:?}"))?;
}
fs::write(&path, contents).await.with_context(|| {
format!("Failed to write action file to {path:?}")
})?;
let CoreConfig { ssl_enabled, .. } = core_config();
let https_cert_flag = if *ssl_enabled {
" --unsafely-ignore-certificate-errors=localhost"
} else {
""
};
let mut res = run_komodo_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!("deno run --allow-all {}", path.display()),
false,
format!(
"deno run --allow-all{https_cert_flag} {}",
path.display()
),
)
.await;
@@ -305,8 +317,8 @@ fn delete_file(
if name == file {
if let Err(e) = fs::remove_file(entry.path()).await {
warn!(
"Failed to clean up generated file after action execution | {e:#}"
);
"Failed to clean up generated file after action execution | {e:#}"
);
};
return true;
}

View File

@@ -1,6 +1,6 @@
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
@@ -82,7 +82,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
) -> serror::Result<Update> {
let mut build = resource::get_check_permissions::<Build>(
&self.build,
&user,
user,
PermissionLevel::Execute,
)
.await?;
@@ -517,7 +517,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
) -> serror::Result<Update> {
let build = resource::get_check_permissions::<Build>(
&self.build,
&user,
user,
PermissionLevel::Execute,
)
.await?;
@@ -560,7 +560,9 @@ impl Resolve<ExecuteArgs> for CancelBuild {
)
.await
{
warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}")
warn!(
"failed to set CancelBuild Update status Complete after timeout | {e:#}"
)
}
});

View File

@@ -1,21 +1,21 @@
use std::{collections::HashSet, sync::OnceLock};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use cache::TimeoutCache;
use formatting::format_serror;
use komodo_client::{
api::execute::*,
entities::{
Version,
build::{Build, ImageRegistryConfig},
deployment::{
extract_registry_domain, Deployment, DeploymentImage,
Deployment, DeploymentImage, extract_registry_domain,
},
get_image_name, komodo_timestamp, optional_string,
permission::PermissionLevel,
server::Server,
update::{Log, Update},
user::User,
Version,
},
};
use periphery_client::api;
@@ -561,7 +561,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, &user).await?;
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -610,7 +610,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, &user).await?;
setup_deployment_execution(&self.deployment, user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()

View File

@@ -1,17 +1,17 @@
use std::{pin::Pin, time::Instant};
use anyhow::Context;
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use axum::{Extension, Router, middleware, routing::post};
use axum_extra::{TypedHeader, headers::ContentType};
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
Operation,
update::{Log, Update},
user::User,
Operation,
},
};
use mungos::by_id::find_one_by_id;
@@ -25,7 +25,7 @@ use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
resource::{list_full_for_user_using_pattern, KomodoResource},
resource::{KomodoResource, list_full_for_user_using_pattern},
state::db_client,
};

View File

@@ -1,6 +1,6 @@
use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use formatting::{Color, bold, colored, format_serror, muted};
use komodo_client::{
api::execute::{
BatchExecutionResponse, BatchRunProcedure, RunProcedure,

View File

@@ -1,6 +1,6 @@
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use komodo_client::{
api::{execute::*, write::RefreshRepoCache},
@@ -173,7 +173,7 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
ExecuteArgs { user, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullRepo>(&self.pattern, &user)
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
.await?,
)
}
@@ -187,7 +187,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
) -> serror::Result<Update> {
let mut repo = resource::get_check_permissions::<Repo>(
&self.repo,
&user,
user,
PermissionLevel::Execute,
)
.await?;
@@ -438,8 +438,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
return handle_builder_early_return(
update, repo.id, repo.name, false,
)
.await
.map_err(Into::into);
.await;
}
};
@@ -696,7 +695,9 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
)
.await
{
warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}")
warn!(
"failed to set CancelRepoBuild Update status Complete after timeout | {e:#}"
)
}
});

View File

@@ -1,4 +1,4 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use komodo_client::{
api::{execute::LaunchServer, write::CreateServer},

View File

@@ -41,7 +41,7 @@ impl super::BatchExecute for BatchDeployStack {
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DeployStack(DeployStack {
stack,
service: None,
services: Vec::new(),
stop_time: None,
})
}
@@ -87,10 +87,13 @@ impl Resolve<ExecuteArgs> for DeployStack {
update_update(update.clone()).await?;
if let Some(service) = &self.service {
if !self.services.is_empty() {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for Stack service {service}"),
"Service/s",
format!(
"Execution requested for Stack service/s {}",
self.services.join(", ")
),
))
}
@@ -183,7 +186,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
} = periphery_client(&server)?
.request(ComposeUp {
stack: stack.clone(),
service: self.service,
services: self.services,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
@@ -371,7 +374,7 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
DeployStack {
stack: stack.name,
service: None,
services: Vec::new(),
stop_time: self.stop_time,
}
.resolve(&ExecuteArgs {
@@ -384,15 +387,20 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
pub async fn pull_stack_inner(
mut stack: Stack,
service: Option<String>,
services: Vec<String>,
server: &Server,
mut update: Option<&mut Update>,
) -> anyhow::Result<ComposePullResponse> {
if let (Some(service), Some(update)) = (&service, update.as_mut()) {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for Stack service {service}"),
))
if let Some(update) = update.as_mut() {
if !services.is_empty() {
update.logs.push(Log::simple(
"Service/s",
format!(
"Execution requested for Stack service/s {}",
services.join(", ")
),
))
}
}
let git_token = crate::helpers::git_token(
@@ -443,7 +451,7 @@ pub async fn pull_stack_inner(
let res = periphery_client(server)?
.request(ComposePull {
stack,
service,
services,
git_token,
registry_token,
})
@@ -483,7 +491,7 @@ impl Resolve<ExecuteArgs> for PullStack {
let res = pull_stack_inner(
stack,
self.service,
self.services,
&server,
Some(&mut update),
)
@@ -505,7 +513,7 @@ impl Resolve<ExecuteArgs> for StartStack {
) -> serror::Result<Update> {
execute_compose::<StartStack>(
&self.stack,
self.service,
self.services,
user,
|state| state.starting = true,
update.clone(),
@@ -524,7 +532,7 @@ impl Resolve<ExecuteArgs> for RestartStack {
) -> serror::Result<Update> {
execute_compose::<RestartStack>(
&self.stack,
self.service,
self.services,
user,
|state| {
state.restarting = true;
@@ -545,7 +553,7 @@ impl Resolve<ExecuteArgs> for PauseStack {
) -> serror::Result<Update> {
execute_compose::<PauseStack>(
&self.stack,
self.service,
self.services,
user,
|state| state.pausing = true,
update.clone(),
@@ -564,7 +572,7 @@ impl Resolve<ExecuteArgs> for UnpauseStack {
) -> serror::Result<Update> {
execute_compose::<UnpauseStack>(
&self.stack,
self.service,
self.services,
user,
|state| state.unpausing = true,
update.clone(),
@@ -583,7 +591,7 @@ impl Resolve<ExecuteArgs> for StopStack {
) -> serror::Result<Update> {
execute_compose::<StopStack>(
&self.stack,
self.service,
self.services,
user,
|state| state.stopping = true,
update.clone(),
@@ -599,7 +607,7 @@ impl super::BatchExecute for BatchDestroyStack {
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DestroyStack(DestroyStack {
stack,
service: None,
services: Vec::new(),
remove_orphans: false,
stop_time: None,
})
@@ -626,7 +634,7 @@ impl Resolve<ExecuteArgs> for DestroyStack {
) -> serror::Result<Update> {
execute_compose::<DestroyStack>(
&self.stack,
self.service,
self.services,
user,
|state| state.destroying = true,
update.clone(),

View File

@@ -1,11 +1,11 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use anyhow::{Context, anyhow};
use formatting::{Color, colored, format_serror};
use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self,
self, ResourceTargetVariant,
action::Action,
alerter::Alerter,
build::Build,
@@ -21,28 +21,24 @@ use komodo_client::{
sync::ResourceSync,
update::{Log, Update},
user::sync_user,
ResourceTargetVariant,
},
};
use mongo_indexed::doc;
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{oid::ObjectId, to_document},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::oid::ObjectId};
use resolver_api::Resolve;
use crate::{
api::write::WriteArgs,
helpers::{query::get_id_to_tags, update::update_update},
resource::{self, refresh_resource_sync_state_cache},
resource,
state::{action_states, db_client},
sync::{
deploy::{
build_deploy_cache, deploy_from_cache, SyncDeployParams,
},
execute::{get_updates_for_execution, ExecuteResourceSync},
remote::RemoteResources,
AllResourcesById, ResourceSyncTrait,
deploy::{
SyncDeployParams, build_deploy_cache, deploy_from_cache,
},
execute::{ExecuteResourceSync, get_updates_for_execution},
remote::RemoteResources,
},
};
@@ -61,7 +57,7 @@ impl Resolve<ExecuteArgs> for RunSync {
} = self;
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Execute)
>(&sync, user, PermissionLevel::Execute)
.await?;
// get the action state for the sync (or insert default).
@@ -210,7 +206,7 @@ impl Resolve<ExecuteArgs> for RunSync {
let delete = sync.config.managed || sync.config.delete;
let (servers_to_create, servers_to_update, servers_to_delete) =
let server_deltas = if sync.config.include_resources {
get_updates_for_execution::<Server>(
resources.servers,
delete,
@@ -220,22 +216,11 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = get_updates_for_execution::<Deployment>(
resources.deployments,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (stacks_to_create, stacks_to_update, stacks_to_delete) =
.await?
} else {
Default::default()
};
let stack_deltas = if sync.config.include_resources {
get_updates_for_execution::<Stack>(
resources.stacks,
delete,
@@ -245,8 +230,25 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (builds_to_create, builds_to_update, builds_to_delete) =
.await?
} else {
Default::default()
};
let deployment_deltas = if sync.config.include_resources {
get_updates_for_execution::<Deployment>(
resources.deployments,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let build_deltas = if sync.config.include_resources {
get_updates_for_execution::<Build>(
resources.builds,
delete,
@@ -256,8 +258,11 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (repos_to_create, repos_to_update, repos_to_delete) =
.await?
} else {
Default::default()
};
let repo_deltas = if sync.config.include_resources {
get_updates_for_execution::<Repo>(
resources.repos,
delete,
@@ -267,22 +272,25 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
procedures_to_create,
procedures_to_update,
procedures_to_delete,
) = get_updates_for_execution::<Procedure>(
resources.procedures,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (actions_to_create, actions_to_update, actions_to_delete) =
.await?
} else {
Default::default()
};
let procedure_deltas = if sync.config.include_resources {
get_updates_for_execution::<Procedure>(
resources.procedures,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let action_deltas = if sync.config.include_resources {
get_updates_for_execution::<Action>(
resources.actions,
delete,
@@ -292,8 +300,11 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (builders_to_create, builders_to_update, builders_to_delete) =
.await?
} else {
Default::default()
};
let builder_deltas = if sync.config.include_resources {
get_updates_for_execution::<Builder>(
resources.builders,
delete,
@@ -303,8 +314,11 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
.await?
} else {
Default::default()
};
let alerter_deltas = if sync.config.include_resources {
get_updates_for_execution::<Alerter>(
resources.alerters,
delete,
@@ -314,35 +328,38 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
) = get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
) = get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
.await?
} else {
Default::default()
};
let server_template_deltas = if sync.config.include_resources {
get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let resource_sync_deltas = if sync.config.include_resources {
get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let (
variables_to_create,
@@ -350,12 +367,11 @@ impl Resolve<ExecuteArgs> for RunSync {
variables_to_delete,
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.match_tags.is_empty()
&& sync.config.include_variables
{
crate::sync::variables::get_updates_for_execution(
resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
delete,
)
.await?
} else {
@@ -367,12 +383,11 @@ impl Resolve<ExecuteArgs> for RunSync {
user_groups_to_delete,
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.match_tags.is_empty()
&& sync.config.include_user_groups
{
crate::sync::user_groups::get_updates_for_execution(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
delete,
&all_resources,
)
.await?
@@ -381,39 +396,17 @@ impl Resolve<ExecuteArgs> for RunSync {
};
if deploy_cache.is_empty()
&& resource_syncs_to_create.is_empty()
&& resource_syncs_to_update.is_empty()
&& resource_syncs_to_delete.is_empty()
&& server_templates_to_create.is_empty()
&& server_templates_to_update.is_empty()
&& server_templates_to_delete.is_empty()
&& servers_to_create.is_empty()
&& servers_to_update.is_empty()
&& servers_to_delete.is_empty()
&& deployments_to_create.is_empty()
&& deployments_to_update.is_empty()
&& deployments_to_delete.is_empty()
&& stacks_to_create.is_empty()
&& stacks_to_update.is_empty()
&& stacks_to_delete.is_empty()
&& builds_to_create.is_empty()
&& builds_to_update.is_empty()
&& builds_to_delete.is_empty()
&& builders_to_create.is_empty()
&& builders_to_update.is_empty()
&& builders_to_delete.is_empty()
&& alerters_to_create.is_empty()
&& alerters_to_update.is_empty()
&& alerters_to_delete.is_empty()
&& repos_to_create.is_empty()
&& repos_to_update.is_empty()
&& repos_to_delete.is_empty()
&& procedures_to_create.is_empty()
&& procedures_to_update.is_empty()
&& procedures_to_delete.is_empty()
&& actions_to_create.is_empty()
&& actions_to_update.is_empty()
&& actions_to_delete.is_empty()
&& resource_sync_deltas.no_changes()
&& server_template_deltas.no_changes()
&& server_deltas.no_changes()
&& deployment_deltas.no_changes()
&& stack_deltas.no_changes()
&& build_deltas.no_changes()
&& builder_deltas.no_changes()
&& alerter_deltas.no_changes()
&& repo_deltas.no_changes()
&& procedure_deltas.no_changes()
&& action_deltas.no_changes()
&& user_groups_to_create.is_empty()
&& user_groups_to_update.is_empty()
&& user_groups_to_delete.is_empty()
@@ -456,111 +449,57 @@ impl Resolve<ExecuteArgs> for RunSync {
);
maybe_extend(
&mut update.logs,
ResourceSync::execute_sync_updates(
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
)
.await,
ResourceSync::execute_sync_updates(resource_sync_deltas).await,
);
maybe_extend(
&mut update.logs,
ServerTemplate::execute_sync_updates(
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
)
.await,
ServerTemplate::execute_sync_updates(server_template_deltas)
.await,
);
maybe_extend(
&mut update.logs,
Server::execute_sync_updates(
servers_to_create,
servers_to_update,
servers_to_delete,
)
.await,
Server::execute_sync_updates(server_deltas).await,
);
maybe_extend(
&mut update.logs,
Alerter::execute_sync_updates(
alerters_to_create,
alerters_to_update,
alerters_to_delete,
)
.await,
Alerter::execute_sync_updates(alerter_deltas).await,
);
maybe_extend(
&mut update.logs,
Action::execute_sync_updates(
actions_to_create,
actions_to_update,
actions_to_delete,
)
.await,
Action::execute_sync_updates(action_deltas).await,
);
// Dependent on server
maybe_extend(
&mut update.logs,
Builder::execute_sync_updates(
builders_to_create,
builders_to_update,
builders_to_delete,
)
.await,
Builder::execute_sync_updates(builder_deltas).await,
);
maybe_extend(
&mut update.logs,
Repo::execute_sync_updates(
repos_to_create,
repos_to_update,
repos_to_delete,
)
.await,
Repo::execute_sync_updates(repo_deltas).await,
);
// Dependant on builder
maybe_extend(
&mut update.logs,
Build::execute_sync_updates(
builds_to_create,
builds_to_update,
builds_to_delete,
)
.await,
Build::execute_sync_updates(build_deltas).await,
);
// Dependant on server / build
maybe_extend(
&mut update.logs,
Deployment::execute_sync_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await,
Deployment::execute_sync_updates(deployment_deltas).await,
);
// stack only depends on server, but maybe will depend on build later.
maybe_extend(
&mut update.logs,
Stack::execute_sync_updates(
stacks_to_create,
stacks_to_update,
stacks_to_delete,
)
.await,
Stack::execute_sync_updates(stack_deltas).await,
);
// Dependant on everything
maybe_extend(
&mut update.logs,
Procedure::execute_sync_updates(
procedures_to_create,
procedures_to_update,
procedures_to_delete,
)
.await,
Procedure::execute_sync_updates(procedure_deltas).await,
);
// Execute the deploy cache
@@ -609,21 +548,6 @@ impl Resolve<ExecuteArgs> for RunSync {
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)

View File

@@ -45,7 +45,7 @@ impl Resolve<ReadArgs> for ListActions {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Action>(self.query, &user, &all_tags)
resource::list_for_user::<Action>(self.query, user, &all_tags)
.await?,
)
}
@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListFullActions {
};
Ok(
resource::list_full_for_user::<Action>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -77,7 +77,7 @@ impl Resolve<ReadArgs> for GetActionActionState {
) -> serror::Result<ActionActionState> {
let action = resource::get_check_permissions::<Action>(
&self.action,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -98,7 +98,7 @@ impl Resolve<ReadArgs> for GetActionsSummary {
) -> serror::Result<GetActionsSummaryResponse> {
let actions = resource::list_full_for_user::<Action>(
Default::default(),
&user,
user,
&[],
)
.await

View File

@@ -61,7 +61,7 @@ impl Resolve<ReadArgs> for ListFullAlerters {
};
Ok(
resource::list_full_for_user::<Alerter>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -75,7 +75,7 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
) -> serror::Result<GetAlertersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Alerter,
>(&user)
>(user)
.await?
{
Some(ids) => doc! {

View File

@@ -6,11 +6,11 @@ use futures::TryStreamExt;
use komodo_client::{
api::read::*,
entities::{
Operation,
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::UpdateStatus,
Operation,
},
};
use mungos::{

View File

@@ -75,7 +75,7 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
) -> serror::Result<GetBuildersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Builder,
>(&user)
>(user)
.await?
{
Some(ids) => doc! {

View File

@@ -1,6 +1,6 @@
use std::{cmp, collections::HashSet};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::read::*,
entities::{
@@ -51,12 +51,20 @@ impl Resolve<ReadArgs> for ListDeployments {
} else {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Deployment>(
self.query, user, &all_tags,
)
.await?,
let only_update_available = self.query.specific.update_available;
let deployments = resource::list_for_user::<Deployment>(
self.query, user, &all_tags,
)
.await?;
let deployments = if only_update_available {
deployments
.into_iter()
.filter(|deployment| deployment.info.update_available)
.collect()
} else {
deployments
};
Ok(deployments)
}
}
@@ -281,7 +289,7 @@ impl Resolve<ReadArgs> for ListCommonDeploymentExtraArgs {
get_all_tags(None).await?
};
let deployments = resource::list_full_for_user::<Deployment>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;

View File

@@ -1,10 +1,11 @@
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use anyhow::{Context, anyhow};
use axum::{Extension, Router, middleware, routing::post};
use komodo_client::{
api::read::*,
entities::{
ResourceTarget,
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
@@ -12,7 +13,6 @@ use komodo_client::{
server::Server,
sync::ResourceSync,
user::User,
ResourceTarget,
},
};
use resolver_api::Resolve;
@@ -315,7 +315,7 @@ impl Resolve<ReadArgs> for ListSecrets {
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`").into(),
)
);
}
};
if let Some(id) = server_id {
@@ -373,7 +373,7 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`").into(),
)
);
}
}
}
@@ -381,17 +381,17 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
&user,
user,
&[]
),
resource::list_full_for_user::<Repo>(
Default::default(),
&user,
user,
&[]
),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user,
user,
&[]
),
)?;
@@ -473,7 +473,7 @@ impl Resolve<ReadArgs> for ListDockerRegistriesFromConfig {
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`").into(),
)
);
}
}
}

View File

@@ -1,4 +1,4 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::read::{
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,

View File

@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListFullProcedures {
};
Ok(
resource::list_full_for_user::<Procedure>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::api::read::*;
use mongo_indexed::{doc, Document};
use mongo_indexed::{Document, doc};
use mungos::{
by_id::find_one_by_id, find::find_collect,
mongodb::options::FindOptions,

View File

@@ -45,7 +45,7 @@ impl Resolve<ReadArgs> for ListRepos {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Repo>(self.query, &user, &all_tags)
resource::list_for_user::<Repo>(self.query, user, &all_tags)
.await?,
)
}
@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListFullRepos {
};
Ok(
resource::list_full_for_user::<Repo>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)

View File

@@ -4,13 +4,14 @@ use std::{
sync::{Arc, OnceLock},
};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use async_timing_util::{
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
FIFTEEN_SECONDS_MS, get_timelength_in_ms, unix_timestamp_ms,
};
use komodo_client::{
api::read::*,
entities::{
ResourceTarget,
deployment::Deployment,
docker::{
container::{Container, ContainerListItem},
@@ -25,7 +26,6 @@ use komodo_client::{
stack::{Stack, StackServiceNames},
stats::{SystemInformation, SystemProcess},
update::Log,
ResourceTarget,
},
};
use mungos::{
@@ -128,7 +128,7 @@ impl Resolve<ReadArgs> for ListServers {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Server>(self.query, &user, &all_tags)
resource::list_for_user::<Server>(self.query, user, &all_tags)
.await?,
)
}
@@ -146,7 +146,7 @@ impl Resolve<ReadArgs> for ListFullServers {
};
Ok(
resource::list_full_for_user::<Server>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -387,7 +387,7 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
) -> serror::Result<ListAllDockerContainersResponse> {
let servers = resource::list_for_user::<Server>(
Default::default(),
&user,
user,
&[],
)
.await?
@@ -533,7 +533,7 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
&user,
user,
)
.await?;

View File

@@ -76,7 +76,7 @@ impl Resolve<ReadArgs> for GetServerTemplatesSummary {
) -> serror::Result<GetServerTemplatesSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
ServerTemplate,
>(&user)
>(user)
.await?
{
Some(ids) => doc! {

View File

@@ -133,7 +133,7 @@ impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -164,7 +164,7 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -194,10 +194,25 @@ impl Resolve<ReadArgs> for ListStacks {
} else {
get_all_tags(None).await?
};
Ok(
let only_update_available = self.query.specific.update_available;
let stacks =
resource::list_for_user::<Stack>(self.query, user, &all_tags)
.await?,
)
.await?;
let stacks = if only_update_available {
stacks
.into_iter()
.filter(|stack| {
stack
.info
.services
.iter()
.any(|service| service.update_available)
})
.collect()
} else {
stacks
};
Ok(stacks)
}
}

View File

@@ -6,7 +6,6 @@ use komodo_client::{
permission::PermissionLevel,
sync::{
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
ResourceSyncState,
},
},
};
@@ -16,7 +15,7 @@ use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{action_states, github_client, resource_sync_state_cache},
state::{action_states, github_client},
};
use super::ReadArgs;
@@ -29,7 +28,7 @@ impl Resolve<ReadArgs> for GetResourceSync {
Ok(
resource::get_check_permissions::<ResourceSync>(
&self.sync,
&user,
user,
PermissionLevel::Read,
)
.await?,
@@ -49,7 +48,7 @@ impl Resolve<ReadArgs> for ListResourceSyncs {
};
Ok(
resource::list_for_user::<ResourceSync>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -68,7 +67,7 @@ impl Resolve<ReadArgs> for ListFullResourceSyncs {
};
Ok(
resource::list_full_for_user::<ResourceSync>(
self.query, &user, &all_tags,
self.query, user, &all_tags,
)
.await?,
)
@@ -112,7 +111,6 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
let mut res = GetResourceSyncsSummaryResponse::default();
let cache = resource_sync_state_cache();
let action_states = action_states();
for resource_sync in resource_syncs {
@@ -131,30 +129,18 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
res.failed += 1;
continue;
}
match (
cache.get(&resource_sync.id).await.unwrap_or_default(),
action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.syncing => {
res.syncing += 1;
}
(ResourceSyncState::Ok, _) => res.ok += 1,
(ResourceSyncState::Failed, _) => res.failed += 1,
(ResourceSyncState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(ResourceSyncState::Syncing, _) => {
unreachable!()
}
(ResourceSyncState::Pending, _) => {
unreachable!()
}
if action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?
.syncing
{
res.syncing += 1;
continue;
}
res.ok += 1;
}
Ok(res)

View File

@@ -6,11 +6,12 @@ use komodo_client::{
ListUserGroups,
},
entities::{
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, resource::ResourceQuery,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, toml::ResourcesToml, ResourceTarget,
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment,
permission::PermissionLevel, procedure::Procedure, repo::Repo,
resource::ResourceQuery, server::Server,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, toml::ResourcesToml, user::User,
},
};
use mungos::find::find_collect;
@@ -23,156 +24,168 @@ use crate::{
resource,
state::db_client,
sync::{
toml::{convert_resource, ToToml, TOML_PRETTY_OPTIONS},
user_groups::convert_user_groups,
AllResourcesById,
toml::{TOML_PRETTY_OPTIONS, ToToml, convert_resource},
user_groups::convert_user_groups,
},
};
use super::ReadArgs;
async fn get_all_targets(
tags: &[String],
user: &User,
) -> anyhow::Result<Vec<ResourceTarget>> {
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Stack(resource.id)),
);
targets.extend(
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<Action>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
// These will already be filtered by [ExportResourcesToToml]
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
Ok(targets)
}
impl Resolve<ReadArgs> for ExportAllResourcesToToml {
async fn resolve(
self,
args: &ReadArgs,
) -> serror::Result<ExportAllResourcesToTomlResponse> {
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if self.tags.is_empty() {
vec![]
let targets = if self.include_resources {
get_all_targets(&self.tags, &args.user).await?
} else {
get_all_tags(None).await?
Vec::new()
};
let ReadArgs { user } = args;
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Stack(resource.id)),
);
targets.extend(
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<Action>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
// These will already be filtered by [ExportResourcesToToml]
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
let user_groups = if user.admin && self.tags.is_empty() {
find_collect(&db_client().user_groups, None, None)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|user_group| user_group.id)
.collect()
let user_groups = if self.include_user_groups {
if args.user.admin {
find_collect(&db_client().user_groups, None, None)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|user_group| user_group.id)
.collect()
} else {
get_user_user_group_ids(&args.user.id).await?
}
} else {
get_user_user_group_ids(&user.id).await?
Vec::new()
};
ExportResourcesToToml {
targets,
user_groups,
include_variables: self.tags.is_empty(),
include_variables: self.include_variables,
}
.resolve(args)
.await
@@ -198,7 +211,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Alerter(id) => {
let alerter = resource::get_check_permissions::<Alerter>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -212,7 +225,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::ResourceSync(id) => {
let sync = resource::get_check_permissions::<ResourceSync>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -231,9 +244,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::ServerTemplate(id) => {
let template = resource::get_check_permissions::<
ServerTemplate,
>(
&id, &user, PermissionLevel::Read
)
>(&id, user, PermissionLevel::Read)
.await?;
res.server_templates.push(
convert_resource::<ServerTemplate>(
@@ -247,7 +258,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Server(id) => {
let server = resource::get_check_permissions::<Server>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -262,7 +273,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
let mut builder =
resource::get_check_permissions::<Builder>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -277,7 +288,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Build(id) => {
let mut build = resource::get_check_permissions::<Build>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -293,7 +304,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
let mut deployment = resource::get_check_permissions::<
Deployment,
>(
&id, &user, PermissionLevel::Read
&id, user, PermissionLevel::Read
)
.await?;
Deployment::replace_ids(&mut deployment, &all);
@@ -307,7 +318,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Repo(id) => {
let mut repo = resource::get_check_permissions::<Repo>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -322,7 +333,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Stack(id) => {
let mut stack = resource::get_check_permissions::<Stack>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -338,7 +349,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
let mut procedure = resource::get_check_permissions::<
Procedure,
>(
&id, &user, PermissionLevel::Read
&id, user, PermissionLevel::Read
)
.await?;
Procedure::replace_ids(&mut procedure, &all);
@@ -352,7 +363,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Action(id) => {
let mut action = resource::get_check_permissions::<Action>(
&id,
&user,
user,
PermissionLevel::Read,
)
.await?;

View File

@@ -1,9 +1,10 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
@@ -18,7 +19,6 @@ use komodo_client::{
sync::ResourceSync,
update::{Update, UpdateListItem},
user::User,
ResourceTarget,
},
};
use mungos::{
@@ -43,7 +43,7 @@ impl Resolve<ReadArgs> for ListUpdates {
self.query
} else {
let server_query =
resource::get_resource_ids_for_user::<Server>(&user)
resource::get_resource_ids_for_user::<Server>(user)
.await?
.map(|ids| {
doc! {
@@ -53,7 +53,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
resource::get_resource_ids_for_user::<Deployment>(&user)
resource::get_resource_ids_for_user::<Deployment>(user)
.await?
.map(|ids| {
doc! {
@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query =
resource::get_resource_ids_for_user::<Stack>(&user)
resource::get_resource_ids_for_user::<Stack>(user)
.await?
.map(|ids| {
doc! {
@@ -73,7 +73,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query =
resource::get_resource_ids_for_user::<Build>(&user)
resource::get_resource_ids_for_user::<Build>(user)
.await?
.map(|ids| {
doc! {
@@ -83,7 +83,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query =
resource::get_resource_ids_for_user::<Repo>(&user)
resource::get_resource_ids_for_user::<Repo>(user)
.await?
.map(|ids| {
doc! {
@@ -93,7 +93,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
resource::get_resource_ids_for_user::<Procedure>(&user)
resource::get_resource_ids_for_user::<Procedure>(user)
.await?
.map(|ids| {
doc! {
@@ -103,7 +103,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query =
resource::get_resource_ids_for_user::<Action>(&user)
resource::get_resource_ids_for_user::<Action>(user)
.await?
.map(|ids| {
doc! {
@@ -113,7 +113,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query =
resource::get_resource_ids_for_user::<Builder>(&user)
resource::get_resource_ids_for_user::<Builder>(user)
.await?
.map(|ids| {
doc! {
@@ -123,7 +123,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query =
resource::get_resource_ids_for_user::<Alerter>(&user)
resource::get_resource_ids_for_user::<Alerter>(user)
.await?
.map(|ids| {
doc! {
@@ -133,7 +133,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query =
resource::get_resource_ids_for_user::<ServerTemplate>(&user)
resource::get_resource_ids_for_user::<ServerTemplate>(user)
.await?
.map(|ids| {
doc! {
@@ -144,7 +144,7 @@ impl Resolve<ReadArgs> for ListUpdates {
let resource_sync_query =
resource::get_resource_ids_for_user::<ResourceSync>(
&user,
user,
)
.await?
.map(|ids| {
@@ -242,12 +242,12 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::System(_) => {
return Err(
anyhow!("user must be admin to view system updates").into(),
)
);
}
ResourceTarget::Server(id) => {
resource::get_check_permissions::<Server>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -255,7 +255,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Deployment(id) => {
resource::get_check_permissions::<Deployment>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -263,7 +263,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Build(id) => {
resource::get_check_permissions::<Build>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -271,7 +271,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Repo(id) => {
resource::get_check_permissions::<Repo>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -279,7 +279,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Builder(id) => {
resource::get_check_permissions::<Builder>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -287,7 +287,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Alerter(id) => {
resource::get_check_permissions::<Alerter>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -295,7 +295,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Procedure(id) => {
resource::get_check_permissions::<Procedure>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -303,7 +303,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Action(id) => {
resource::get_check_permissions::<Action>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -311,7 +311,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -319,7 +319,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;
@@ -327,7 +327,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Stack(id) => {
resource::get_check_permissions::<Stack>(
id,
&user,
user,
PermissionLevel::Read,
)
.await?;

View File

@@ -1,4 +1,4 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::read::{
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
@@ -6,7 +6,7 @@ use komodo_client::{
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{admin_service_user, UserConfig},
entities::user::{UserConfig, admin_service_user},
};
use mungos::{
by_id::find_one_by_id,

View File

@@ -5,7 +5,7 @@ use komodo_client::api::read::*;
use mungos::{
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId, Document},
bson::{Document, doc, oid::ObjectId},
options::FindOptions,
},
};

View File

@@ -1,7 +1,7 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Json, Router};
use anyhow::{Context, anyhow};
use axum::{Extension, Json, Router, middleware, routing::post};
use derive_variants::EnumVariants;
use komodo_client::{
api::user::*,

View File

@@ -32,12 +32,12 @@ impl Resolve<WriteArgs> for CopyAction {
let Action { config, .. } =
resource::get_check_permissions::<Action>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Action>(&self.name, config.into(), &user)
resource::create::<Action>(&self.name, config.into(), user)
.await?,
)
}

View File

@@ -32,7 +32,7 @@ impl Resolve<WriteArgs> for CopyAlerter {
let Alerter { config, .. } =
resource::get_check_permissions::<Alerter>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;

View File

@@ -1,13 +1,13 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, NoData,
build::{Build, BuildInfo, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
update::Update,
CloneArgs, NoData,
},
};
use mongo_indexed::doc;
@@ -308,7 +308,7 @@ impl Resolve<WriteArgs> for DeleteBuildWebhook {
let build = resource::get_check_permissions::<Build>(
&self.build,
&user,
user,
PermissionLevel::Write,
)
.await?;

View File

@@ -37,7 +37,7 @@ impl Resolve<WriteArgs> for CopyBuilder {
)
.await?;
Ok(
resource::create::<Builder>(&self.name, config.into(), &user)
resource::create::<Builder>(&self.name, config.into(), user)
.await?,
)
}
@@ -72,9 +72,6 @@ impl Resolve<WriteArgs> for RenameBuilder {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Update> {
Ok(
resource::rename::<Builder>(&self.id, &self.name, &user)
.await?,
)
Ok(resource::rename::<Builder>(&self.id, &self.name, user).await?)
}
}

View File

@@ -1,7 +1,8 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::*,
entities::{
Operation,
deployment::{
Deployment, DeploymentImage, DeploymentState,
PartialDeploymentConfig, RestartMode,
@@ -12,7 +13,6 @@ use komodo_client::{
server::{Server, ServerState},
to_komodo_name,
update::Update,
Operation,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
@@ -58,12 +58,8 @@ impl Resolve<WriteArgs> for CopyDeployment {
)
.await?;
Ok(
resource::create::<Deployment>(
&self.name,
config.into(),
&user,
)
.await?,
resource::create::<Deployment>(&self.name, config.into(), user)
.await?,
)
}
}
@@ -157,7 +153,7 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
}
Ok(
resource::create::<Deployment>(&self.name, config, &user)
resource::create::<Deployment>(&self.name, config, user)
.await?,
)
}
@@ -180,7 +176,7 @@ impl Resolve<WriteArgs> for UpdateDeployment {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
Ok(
resource::update::<Deployment>(&self.id, self.config, &user)
resource::update::<Deployment>(&self.id, self.config, user)
.await?,
)
}
@@ -224,7 +220,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
}
let mut update =
make_update(&deployment, Operation::RenameDeployment, &user);
make_update(&deployment, Operation::RenameDeployment, user);
update_one_by_id(
&db_client().deployments,

View File

@@ -2,10 +2,10 @@ use anyhow::anyhow;
use komodo_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, ResourceTarget,
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, server_template::ServerTemplate,
stack::Stack, sync::ResourceSync,
},
};
use resolver_api::Resolve;
@@ -27,13 +27,13 @@ impl Resolve<WriteArgs> for UpdateDescription {
"cannot update description of System resource target"
)
.into(),
)
);
}
ResourceTarget::Server(id) => {
resource::update_description::<Server>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -41,7 +41,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Deployment>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -49,7 +49,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Build>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -57,7 +57,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Repo>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -65,7 +65,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Builder>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -73,7 +73,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Alerter>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -81,7 +81,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Procedure>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -89,7 +89,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Action>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -97,7 +97,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<ServerTemplate>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -105,7 +105,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<ResourceSync>(
&id,
&self.description,
&user,
user,
)
.await?;
}
@@ -113,7 +113,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Stack>(
&id,
&self.description,
&user,
user,
)
.await?;
}

View File

@@ -1,7 +1,7 @@
use std::time::Instant;
use anyhow::Context;
use axum::{middleware, routing::post, Extension, Router};
use axum::{Extension, Router, middleware, routing::post};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::write::*, entities::user::User};
use resolver_api::Resolve;
@@ -172,6 +172,7 @@ pub enum WriteRequest {
CreateTag(CreateTag),
DeleteTag(DeleteTag),
RenameTag(RenameTag),
UpdateTagColor(UpdateTagColor),
UpdateTagsOnResource(UpdateTagsOnResource),
// ==== VARIABLE ====

View File

@@ -1,17 +1,17 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::*,
entities::{
permission::{UserTarget, UserTargetVariant},
ResourceTarget, ResourceTargetVariant,
permission::{UserTarget, UserTargetVariant},
},
};
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
mongodb::{
bson::{doc, oid::ObjectId, Document},
bson::{Document, doc, oid::ObjectId},
options::UpdateOptions,
},
};

View File

@@ -32,7 +32,7 @@ impl Resolve<WriteArgs> for CopyProcedure {
let Procedure { config, .. } =
resource::get_check_permissions::<Procedure>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;

View File

@@ -1,9 +1,9 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::*,
entities::{
provider::{DockerRegistryAccount, GitProviderAccount},
Operation, ResourceTarget,
provider::{DockerRegistryAccount, GitProviderAccount},
},
};
use mungos::{
@@ -44,7 +44,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateGitProviderAccount,
&user,
user,
);
account.id = db_client()
@@ -114,7 +114,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
&user,
user,
);
let account = to_document(&self.account).context(
@@ -173,7 +173,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
&user,
user,
);
let db = db_client();
@@ -235,7 +235,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateDockerRegistryAccount,
&user,
user,
);
account.id = db_client()
@@ -298,8 +298,8 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
if username.is_empty() {
return Err(
anyhow!(
"cannot update docker registry account with empty username"
)
"cannot update docker registry account with empty username"
)
.into(),
);
}
@@ -310,7 +310,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
&user,
user,
);
let account = to_document(&self.account).context(
@@ -373,7 +373,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
&user,
user,
);
let db = db_client();

View File

@@ -1,9 +1,10 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, NoData, Operation,
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
@@ -11,7 +12,6 @@ use komodo_client::{
server::Server,
to_komodo_name,
update::{Log, Update},
CloneArgs, NoData, Operation,
},
};
use mongo_indexed::doc;
@@ -53,12 +53,12 @@ impl Resolve<WriteArgs> for CopyRepo {
let Repo { config, .. } =
resource::get_check_permissions::<Repo>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Repo>(&self.name, config.into(), &user)
resource::create::<Repo>(&self.name, config.into(), user)
.await?,
)
}
@@ -89,7 +89,7 @@ impl Resolve<WriteArgs> for RenameRepo {
) -> serror::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;
@@ -98,7 +98,7 @@ impl Resolve<WriteArgs> for RenameRepo {
|| !repo.config.path.is_empty()
{
return Ok(
resource::rename::<Repo>(&repo.id, &self.name, &user).await?,
resource::rename::<Repo>(&repo.id, &self.name, user).await?,
);
}
@@ -113,7 +113,7 @@ impl Resolve<WriteArgs> for RenameRepo {
let name = to_komodo_name(&self.name);
let mut update = make_update(&repo, Operation::RenameRepo, &user);
let mut update = make_update(&repo, Operation::RenameRepo, user);
update_one_by_id(
&db_client().repos,
@@ -171,7 +171,7 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
// repo should be able to do this.
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
&user,
user,
PermissionLevel::Execute,
)
.await?;

View File

@@ -2,10 +2,10 @@ use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
Operation,
permission::PermissionLevel,
server::Server,
update::{Update, UpdateStatus},
Operation,
},
};
use periphery_client::api;
@@ -77,7 +77,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::CreateNetwork, &user);
make_update(&server, Operation::CreateNetwork, user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;

View File

@@ -24,7 +24,7 @@ impl Resolve<WriteArgs> for CreateServerTemplate {
resource::create::<ServerTemplate>(
&self.name,
self.config,
&user,
user,
)
.await?,
)
@@ -40,7 +40,7 @@ impl Resolve<WriteArgs> for CopyServerTemplate {
let ServerTemplate { config, .. } =
resource::get_check_permissions::<ServerTemplate>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;
@@ -48,7 +48,7 @@ impl Resolve<WriteArgs> for CopyServerTemplate {
resource::create::<ServerTemplate>(
&self.name,
config.into(),
&user,
user,
)
.await?,
)

View File

@@ -1,6 +1,6 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::{user::CreateApiKey, write::*},
entities::{

View File

@@ -1,15 +1,15 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
FileContents, NoData, Operation,
config::core::CoreConfig,
permission::PermissionLevel,
server::ServerState,
stack::{PartialStackConfig, Stack, StackInfo},
update::Update,
user::stack_user,
FileContents, NoData, Operation,
},
};
use mungos::mongodb::bson::{doc, to_document};
@@ -33,7 +33,7 @@ use crate::{
resource,
stack::{
get_stack_and_server,
remote::{get_repo_compose_contents, RemoteComposeContents},
remote::{RemoteComposeContents, get_repo_compose_contents},
services::extract_services_into_res,
},
state::{db_client, github_client},
@@ -114,7 +114,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
} = self;
let (mut stack, server) = get_stack_and_server(
&stack,
&user,
user,
PermissionLevel::Write,
true,
)
@@ -127,7 +127,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
}
let mut update =
make_update(&stack, Operation::WriteStackContents, &user);
make_update(&stack, Operation::WriteStackContents, user);
update.push_simple_log("File contents to write", &contents);
@@ -402,7 +402,7 @@ impl Resolve<WriteArgs> for RefreshStackCache {
if state == ServerState::Ok {
let name = stack.name.clone();
if let Err(e) =
pull_stack_inner(stack, None, &server, None).await
pull_stack_inner(stack, Vec::new(), &server, None).await
{
warn!(
"Failed to pull latest images for Stack {name} | {e:#}",

View File

@@ -1,11 +1,11 @@
use std::{collections::HashMap, path::PathBuf};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::format_serror;
use komodo_client::{
api::{read::ExportAllResourcesToToml, write::*},
entities::{
self,
self, CloneArgs, NoData, Operation, ResourceTarget,
action::Action,
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
@@ -23,11 +23,11 @@ use komodo_client::{
stack::Stack,
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
SyncDeployUpdate,
},
to_komodo_name,
update::{Log, Update},
user::sync_user,
CloneArgs, NoData, Operation, ResourceTarget,
},
};
use mungos::{
@@ -49,11 +49,11 @@ use crate::{
query::get_id_to_tags,
update::{add_update, make_update, update_update},
},
resource::{self, refresh_resource_sync_state_cache},
resource,
state::{db_client, github_client},
sync::{
deploy::SyncDeployParams, remote::RemoteResources,
view::push_updates_for_view, AllResourcesById,
AllResourcesById, deploy::SyncDeployParams,
remote::RemoteResources, view::push_updates_for_view,
},
};
@@ -66,12 +66,8 @@ impl Resolve<WriteArgs> for CreateResourceSync {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
Ok(
resource::create::<ResourceSync>(
&self.name,
self.config,
&user,
)
.await?,
resource::create::<ResourceSync>(&self.name, self.config, user)
.await?,
)
}
}
@@ -85,7 +81,7 @@ impl Resolve<WriteArgs> for CopyResourceSync {
let ResourceSync { config, .. } =
resource::get_check_permissions::<ResourceSync>(
&self.id,
&user,
user,
PermissionLevel::Write,
)
.await?;
@@ -160,12 +156,18 @@ async fn write_sync_file_contents_on_host(
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
let _ = fs::create_dir_all(parent).await;
fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize resource file parent directory {parent:?}"
)
})?;
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!("Failed to write file contents to {full_path:?}")
format!(
"Failed to write resource file contents to {full_path:?}"
)
})
{
update.push_error_log("Write File", format_serror(&e.into()));
@@ -223,7 +225,11 @@ async fn write_sync_file_contents_git(
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
let _ = fs::create_dir_all(parent).await;
fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize resource file parent directory {parent:?}"
)
})?;
}
// Ensure the folder is initialized as git repo.
@@ -257,7 +263,9 @@ async fn write_sync_file_contents_git(
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!("Failed to write file contents to {full_path:?}")
format!(
"Failed to write resource file contents to {full_path:?}"
)
})
{
update.push_error_log("Write File", format_serror(&e.into()));
@@ -339,7 +347,7 @@ impl Resolve<WriteArgs> for CommitSync {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&self.sync, &user, PermissionLevel::Write)
>(&self.sync, user, PermissionLevel::Write)
.await?;
let file_contents_empty = sync.config.file_contents_empty();
@@ -381,14 +389,17 @@ impl Resolve<WriteArgs> for CommitSync {
};
let res = ExportAllResourcesToToml {
include_resources: sync.config.include_resources,
tags: sync.config.match_tags.clone(),
include_variables: sync.config.include_variables,
include_user_groups: sync.config.include_user_groups,
}
.resolve(&ReadArgs {
user: sync_user().to_owned(),
})
.await?;
let mut update = make_update(&sync, Operation::CommitSync, &user);
let mut update = make_update(&sync, Operation::CommitSync, user);
update.id = add_update(update.clone()).await?;
update.logs.push(Log::simple("Resources", res.toml.clone()));
@@ -403,7 +414,9 @@ impl Resolve<WriteArgs> for CommitSync {
.join(to_komodo_name(&sync.name))
.join(&resource_path);
if let Some(parent) = file_path.parent() {
let _ = tokio::fs::create_dir_all(&parent).await;
fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to initialize resource file parent directory {parent:?}"))?;
};
if let Err(e) = tokio::fs::write(&file_path, &res.toml)
.await
@@ -483,21 +496,6 @@ impl Resolve<WriteArgs> for CommitSync {
};
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
@@ -548,171 +546,174 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
sync.info.pending_message = message;
if !sync.info.remote_errors.is_empty() {
return Err(
anyhow!(
"Remote resources have errors. Cannot compute diffs."
)
.into(),
);
return Err(anyhow!(
"Remote resources have errors. Cannot compute diffs."
));
}
let resources = resources?;
let id_to_tags = get_id_to_tags(None).await?;
let delete = sync.config.managed || sync.config.delete;
let all_resources = AllResourcesById::load().await?;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| {
(deployment.name.clone(), deployment.clone())
})
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let (resource_updates, deploy_updates) =
if sync.config.include_resources {
let id_to_tags = get_id_to_tags(None).await?;
let deploy_updates =
crate::sync::deploy::get_updates_for_view(SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
})
.await;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| {
(deployment.name.clone(), deployment.clone())
})
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let delete = sync.config.managed || sync.config.delete;
let deploy_updates =
crate::sync::deploy::get_updates_for_view(
SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
},
)
.await;
let mut diffs = Vec::new();
let mut diffs = Vec::new();
{
push_updates_for_view::<Server>(
resources.servers,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Stack>(
resources.stacks,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Deployment>(
resources.deployments,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Build>(
resources.builds,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Repo>(
resources.repos,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Procedure>(
resources.procedures,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Action>(
resources.actions,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Builder>(
resources.builders,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Alerter>(
resources.alerters,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
}
push_updates_for_view::<Server>(
resources.servers,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Stack>(
resources.stacks,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Deployment>(
resources.deployments,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Build>(
resources.builds,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Repo>(
resources.repos,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Procedure>(
resources.procedures,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Action>(
resources.actions,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Builder>(
resources.builders,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Alerter>(
resources.alerters,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
let variable_updates = if sync.config.match_tags.is_empty() {
(diffs, deploy_updates)
} else {
(Vec::new(), SyncDeployUpdate::default())
};
let variable_updates = if sync.config.include_variables {
crate::sync::variables::get_updates_for_view(
&resources.variables,
delete,
@@ -722,7 +723,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
Default::default()
};
let user_group_updates = if sync.config.match_tags.is_empty() {
let user_group_updates = if sync.config.include_user_groups {
crate::sync::user_groups::get_updates_for_view(
resources.user_groups,
delete,
@@ -734,7 +735,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
};
anyhow::Ok((
diffs,
resource_updates,
deploy_updates,
variable_updates,
user_group_updates,

View File

@@ -1,17 +1,26 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::{
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
UpdateTagsOnResourceResponse,
CreateTag, DeleteTag, RenameTag, UpdateTagColor,
UpdateTagsOnResource, UpdateTagsOnResourceResponse,
},
entities::{
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, ResourceTarget,
ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
tag::{Tag, TagColor},
},
};
use mungos::{
@@ -41,6 +50,7 @@ impl Resolve<WriteArgs> for CreateTag {
let mut tag = Tag {
id: Default::default(),
name: self.name,
color: TagColor::Slate,
owner: user.id.clone(),
};
@@ -68,7 +78,7 @@ impl Resolve<WriteArgs> for RenameTag {
return Err(anyhow!("tag name cannot be ObjectId").into());
}
get_tag_check_owner(&self.id, &user).await?;
get_tag_check_owner(&self.id, user).await?;
update_one_by_id(
&db_client().tags,
@@ -83,13 +93,34 @@ impl Resolve<WriteArgs> for RenameTag {
}
}
impl Resolve<WriteArgs> for UpdateTagColor {
#[instrument(name = "UpdateTagColor", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Tag> {
let tag = get_tag_check_owner(&self.tag, user).await?;
update_one_by_id(
&db_client().tags,
&tag.id,
doc! { "$set": { "color": self.color.as_ref() } },
None,
)
.await
.context("failed to rename tag on db")?;
Ok(get_tag(&self.tag).await?)
}
}
impl Resolve<WriteArgs> for DeleteTag {
#[instrument(name = "DeleteTag", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Tag> {
let tag = get_tag_check_owner(&self.id, &user).await?;
let tag = get_tag_check_owner(&self.id, user).await?;
tokio::try_join!(
resource::remove_tag_from_all::<Server>(&self.id),
@@ -118,7 +149,7 @@ impl Resolve<WriteArgs> for UpdateTagsOnResource {
let WriteArgs { user } = args;
match self.target {
ResourceTarget::System(_) => {
return Err(anyhow!("Invalid target type: System").into())
return Err(anyhow!("Invalid target type: System").into());
}
ResourceTarget::Build(id) => {
resource::get_check_permissions::<Build>(

View File

@@ -1,18 +1,20 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::{
DeleteUser, DeleteUserResponse, UpdateUserPassword,
UpdateUserPasswordResponse, UpdateUserUsername,
UpdateUserUsernameResponse,
},
entities::{user::UserConfig, NoData},
entities::{NoData, user::UserConfig},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{helpers::hash_password, state::db_client};
use crate::{
config::core_config, helpers::hash_password, state::db_client,
};
use super::WriteArgs;
@@ -23,6 +25,16 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateUserUsernameResponse> {
for locked_username in &core_config().lock_login_credentials_for {
if locked_username == "__ALL__"
|| *locked_username == user.username
{
return Err(
anyhow!("User not allowed to update their username.")
.into(),
);
}
}
if self.username.is_empty() {
return Err(anyhow!("Username cannot be empty.").into());
}
@@ -56,6 +68,16 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateUserPasswordResponse> {
for locked_username in &core_config().lock_login_credentials_for {
if locked_username == "__ALL__"
|| *locked_username == user.username
{
return Err(
anyhow!("User not allowed to update their password.")
.into(),
);
}
}
let UserConfig::Local { .. } = user.config else {
return Err(anyhow!("User is not local user").into());
};

View File

@@ -1,6 +1,6 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::{
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,

View File

@@ -1,7 +1,7 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::{
api::write::*,
entities::{variable::Variable, Operation, ResourceTarget},
entities::{Operation, ResourceTarget, variable::Variable},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
@@ -49,7 +49,7 @@ impl Resolve<WriteArgs> for CreateVariable {
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateVariable,
&user,
user,
);
update
@@ -92,7 +92,7 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateVariableValue,
&user,
user,
);
let log = if variable.is_secret {

View File

@@ -1,11 +1,11 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use tokio::sync::Mutex;
use crate::{
@@ -47,15 +47,21 @@ impl GithubOauthClient {
return None;
}
if host.is_empty() {
warn!("github oauth is enabled, but 'config.host' is not configured");
warn!(
"github oauth is enabled, but 'config.host' is not configured"
);
return None;
}
if id.is_empty() {
warn!("github oauth is enabled, but 'config.github_oauth.id' is not configured");
warn!(
"github oauth is enabled, but 'config.github_oauth.id' is not configured"
);
return None;
}
if secret.is_empty() {
warn!("github oauth is enabled, but 'config.github_oauth.secret' is not configured");
warn!(
"github oauth is enabled, but 'config.github_oauth.secret' is not configured"
);
return None;
}
GithubOauthClient {

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
Router, extract::Query, response::Redirect, routing::get,
};
use komodo_client::entities::{
komodo_timestamp,
@@ -72,7 +72,7 @@ async fn callback(
.context("failed at find user query from database")?;
let jwt = match user {
Some(user) => jwt_client()
.generate(user.id)
.encode(user.id)
.context("failed to generate jwt")?,
None => {
let ts = komodo_timestamp();
@@ -109,7 +109,7 @@ async fn callback(
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.generate(user_id)
.encode(user_id)
.context("failed to generate jwt")?
}
};

View File

@@ -1,13 +1,12 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use jwt::Token;
use anyhow::{Context, anyhow};
use jsonwebtoken::{DecodingKey, Validation, decode};
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{de::DeserializeOwned, Deserialize};
use serde_json::Value;
use serde::{Deserialize, de::DeserializeOwned};
use tokio::sync::Mutex;
use crate::{
@@ -49,15 +48,21 @@ impl GoogleOauthClient {
return None;
}
if host.is_empty() {
warn!("google oauth is enabled, but 'config.host' is not configured");
warn!(
"google oauth is enabled, but 'config.host' is not configured"
);
return None;
}
if id.is_empty() {
warn!("google oauth is enabled, but 'config.google_oauth.id' is not configured");
warn!(
"google oauth is enabled, but 'config.google_oauth.id' is not configured"
);
return None;
}
if secret.is_empty() {
warn!("google oauth is enabled, but 'config.google_oauth.secret' is not configured");
warn!(
"google oauth is enabled, but 'config.google_oauth.secret' is not configured"
);
return None;
}
let scopes = urlencoding::encode(
@@ -139,10 +144,16 @@ impl GoogleOauthClient {
&self,
id_token: &str,
) -> anyhow::Result<GoogleUser> {
let t: Token<Value, GoogleUser, jwt::Unverified> =
Token::parse_unverified(id_token)
.context("failed to parse id_token")?;
Ok(t.claims().to_owned())
let mut v = Validation::new(Default::default());
v.insecure_disable_signature_validation();
v.validate_aud = false;
let res = decode::<GoogleUser>(
id_token,
&DecodingKey::from_secret(b""),
&v,
)
.context("failed to decode google id token")?;
Ok(res.claims)
}
#[instrument(level = "debug", skip(self))]

View File

@@ -1,7 +1,7 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Query, response::Redirect, routing::get, Router,
Router, extract::Query, response::Redirect, routing::get,
};
use komodo_client::entities::user::{User, UserConfig};
use mongo_indexed::Document;
@@ -81,7 +81,7 @@ async fn callback(
.context("failed at find user query from mongo")?;
let jwt = match user {
Some(user) => jwt_client()
.generate(user.id)
.encode(user.id)
.context("failed to generate jwt")?,
None => {
let ts = unix_timestamp_ms() as i64;
@@ -124,7 +124,7 @@ async fn callback(
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.generate(user_id)
.encode(user_id)
.context("failed to generate jwt")?
}
};

View File

@@ -1,15 +1,15 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use async_timing_util::{
get_timelength_in_ms, unix_timestamp_ms, Timelength,
Timelength, get_timelength_in_ms, unix_timestamp_ms,
};
use jsonwebtoken::{
DecodingKey, EncodingKey, Header, Validation, decode, encode,
};
use hmac::{Hmac, Mac};
use jwt::SignWithKey;
use komodo_client::entities::config::core::CoreConfig;
use mungos::mongodb::bson::doc;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
use crate::helpers::random_string;
@@ -24,7 +24,10 @@ pub struct JwtClaims {
}
pub struct JwtClient {
pub key: Hmac<Sha256>,
header: Header,
validation: Validation,
encoding_key: EncodingKey,
decoding_key: DecodingKey,
ttl_ms: u128,
exchange_tokens: ExchangeTokenMap,
}
@@ -36,10 +39,11 @@ impl JwtClient {
} else {
config.jwt_secret.clone()
};
let key = Hmac::new_from_slice(secret.as_bytes())
.context("failed at taking HmacSha256 of jwt secret")?;
Ok(JwtClient {
key,
header: Header::default(),
validation: Validation::new(Default::default()),
encoding_key: EncodingKey::from_secret(secret.as_bytes()),
decoding_key: DecodingKey::from_secret(secret.as_bytes()),
ttl_ms: get_timelength_in_ms(
config.jwt_ttl.to_string().parse()?,
),
@@ -47,7 +51,7 @@ impl JwtClient {
})
}
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
pub fn encode(&self, user_id: String) -> anyhow::Result<String> {
let iat = unix_timestamp_ms();
let exp = iat + self.ttl_ms;
let claims = JwtClaims {
@@ -55,10 +59,14 @@ impl JwtClient {
iat,
exp,
};
let jwt = claims
.sign_with_key(&self.key)
.context("failed at signing claim")?;
Ok(jwt)
encode(&self.header, &claims, &self.encoding_key)
.context("failed at signing claim")
}
pub fn decode(&self, jwt: &str) -> anyhow::Result<JwtClaims> {
decode::<JwtClaims>(jwt, &self.decoding_key, &self.validation)
.map(|res| res.claims)
.context("failed to decode token claims")
}
#[instrument(level = "debug", skip_all)]

View File

@@ -1,6 +1,6 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use async_timing_util::unix_timestamp_ms;
use komodo_client::{
api::auth::{
@@ -85,7 +85,7 @@ impl Resolve<AuthArgs> for CreateLocalUser {
.to_string();
let jwt = jwt_client()
.generate(user_id)
.encode(user_id)
.context("failed to generate jwt for user")?;
Ok(CreateLocalUserResponse { jwt })
@@ -131,7 +131,7 @@ impl Resolve<AuthArgs> for LoginLocalUser {
}
let jwt = jwt_client()
.generate(user.id)
.encode(user.id)
.context("failed at generating jwt for user")?;
Ok(LoginLocalUserResponse { jwt })

View File

@@ -1,5 +1,4 @@
use ::jwt::VerifyWithKey;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Request, http::HeaderMap, middleware::Next,
@@ -71,7 +70,9 @@ pub async fn get_user_id_from_headers(
}
_ => {
// AUTH FAIL
Err(anyhow!("must attach either AUTHORIZATION header with jwt OR pass X-API-KEY and X-API-SECRET"))
Err(anyhow!(
"must attach either AUTHORIZATION header with jwt OR pass X-API-KEY and X-API-SECRET"
))
}
}
}
@@ -93,9 +94,7 @@ pub async fn authenticate_check_enabled(
pub async fn auth_jwt_get_user_id(
jwt: &str,
) -> anyhow::Result<String> {
let claims: JwtClaims = jwt
.verify_with_key(&jwt_client().key)
.context("failed to verify claims")?;
let claims: JwtClaims = jwt_client().decode(jwt)?;
if claims.exp > unix_timestamp_ms() {
Ok(claims.id)
} else {

View File

@@ -1,67 +1,94 @@
use std::sync::OnceLock;
use std::{sync::OnceLock, time::Duration};
use anyhow::Context;
use arc_swap::ArcSwapOption;
use openidconnect::{
core::{CoreClient, CoreProviderMetadata},
reqwest::async_http_client,
ClientId, ClientSecret, IssuerUrl, RedirectUrl,
Client, ClientId, ClientSecret, EmptyAdditionalClaims,
EndpointMaybeSet, EndpointNotSet, EndpointSet, IssuerUrl,
RedirectUrl, StandardErrorResponse, core::*,
};
use crate::config::core_config;
static DEFAULT_OIDC_CLIENT: OnceLock<Option<CoreClient>> =
OnceLock::new();
type OidcClient = Client<
EmptyAdditionalClaims,
CoreAuthDisplay,
CoreGenderClaim,
CoreJweContentEncryptionAlgorithm,
CoreJsonWebKey,
CoreAuthPrompt,
StandardErrorResponse<CoreErrorResponseType>,
CoreTokenResponse,
CoreTokenIntrospectionResponse,
CoreRevocableToken,
CoreRevocationErrorResponse,
EndpointSet,
EndpointNotSet,
EndpointNotSet,
EndpointNotSet,
EndpointMaybeSet,
EndpointMaybeSet,
>;
pub fn default_oidc_client() -> Option<&'static CoreClient> {
DEFAULT_OIDC_CLIENT
.get()
.expect("OIDC client get before init")
.as_ref()
pub fn oidc_client() -> &'static ArcSwapOption<OidcClient> {
static OIDC_CLIENT: OnceLock<ArcSwapOption<OidcClient>> =
OnceLock::new();
OIDC_CLIENT.get_or_init(Default::default)
}
pub async fn init_default_oidc_client() {
/// The OIDC client must be reinitialized to
/// pick up the latest provider JWKs. This
/// function spawns a management thread to do this
/// on a loop.
pub async fn spawn_oidc_client_management() {
let config = core_config();
if !config.oidc_enabled
|| config.oidc_provider.is_empty()
|| config.oidc_client_id.is_empty()
|| config.oidc_client_secret.is_empty()
{
DEFAULT_OIDC_CLIENT
.set(None)
.expect("Default OIDC client initialized twice");
return;
}
async {
// Use OpenID Connect Discovery to fetch the provider metadata.
let provider_metadata = CoreProviderMetadata::discover_async(
IssuerUrl::new(config.oidc_provider.clone())?,
async_http_client,
)
reset_oidc_client()
.await
.context(
"Failed to get OIDC /.well-known/openid-configuration",
)?;
// Create an OpenID Connect client by specifying the client ID, client secret, authorization URL
// and token URL.
let client = CoreClient::from_provider_metadata(
provider_metadata,
ClientId::new(config.oidc_client_id.to_string()),
Some(ClientSecret::new(config.oidc_client_secret.to_string())),
)
// Set the URL the user will be redirected to after the authorization process.
.set_redirect_uri(RedirectUrl::new(format!(
"{}/auth/oidc/callback",
core_config().host
))?);
DEFAULT_OIDC_CLIENT
.set(Some(client))
.expect("Default OIDC client initialized twice");
anyhow::Ok(())
}
.await
.context("Failed to init default OIDC client")
.unwrap();
.context("Failed to initialize OIDC client.")
.unwrap();
tokio::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = reset_oidc_client().await {
warn!("Failed to reinitialize OIDC client | {e:#}");
}
}
});
}
async fn reset_oidc_client() -> anyhow::Result<()> {
let config = core_config();
// Use OpenID Connect Discovery to fetch the provider metadata.
let provider_metadata = CoreProviderMetadata::discover_async(
IssuerUrl::new(config.oidc_provider.clone())?,
super::reqwest_client(),
)
.await
.context("Failed to get OIDC /.well-known/openid-configuration")?;
let client = CoreClient::from_provider_metadata(
provider_metadata,
ClientId::new(config.oidc_client_id.to_string()),
// The secret may be empty / ommitted if auth provider supports PKCE
if config.oidc_client_secret.is_empty() {
None
} else {
Some(ClientSecret::new(config.oidc_client_secret.to_string()))
},
)
// Set the URL the user will be redirected to after the authorization process.
.set_redirect_uri(RedirectUrl::new(format!(
"{}/auth/oidc/callback",
core_config().host
))?);
oidc_client().store(Some(client.into()));
Ok(())
}

View File

@@ -1,20 +1,20 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use axum::{
extract::Query, response::Redirect, routing::get, Router,
Router, extract::Query, response::Redirect, routing::get,
};
use client::default_oidc_client;
use client::oidc_client;
use dashmap::DashMap;
use komodo_client::entities::{
komodo_timestamp,
user::{User, UserConfig},
};
use mungos::mongodb::bson::{doc, Document};
use mungos::mongodb::bson::{Document, doc};
use openidconnect::{
core::CoreAuthenticationFlow, AccessTokenHash, AuthorizationCode,
CsrfToken, Nonce, OAuth2TokenResponse, PkceCodeChallenge,
PkceCodeVerifier, Scope, TokenResponse,
AccessTokenHash, AuthorizationCode, CsrfToken, Nonce,
OAuth2TokenResponse, PkceCodeChallenge, PkceCodeVerifier, Scope,
TokenResponse, core::CoreAuthenticationFlow,
};
use reqwest::StatusCode;
use serde::Deserialize;
@@ -29,16 +29,28 @@ use super::RedirectQuery;
pub mod client;
fn reqwest_client() -> &'static reqwest::Client {
static REQWEST: OnceLock<reqwest::Client> = OnceLock::new();
REQWEST.get_or_init(|| {
reqwest::Client::builder()
.redirect(reqwest::redirect::Policy::none())
.build()
.expect("Invalid OIDC reqwest client")
})
}
/// CSRF tokens can only be used once from the callback,
/// and must be used within this timeframe
const CSRF_VALID_FOR_MS: i64 = 120_000; // 2 minutes for user to log in.
type RedirectUrl = Option<String>;
type CsrfMap =
/// Maps the csrf secrets to other information added in the "login" method (before auth provider redirect).
/// This information is retrieved in the "callback" method (after auth provider redirect).
type VerifierMap =
DashMap<String, (PkceCodeVerifier, Nonce, RedirectUrl, i64)>;
fn csrf_verifier_tokens() -> &'static CsrfMap {
static CSRF: OnceLock<CsrfMap> = OnceLock::new();
CSRF.get_or_init(Default::default)
fn verifier_tokens() -> &'static VerifierMap {
static VERIFIERS: OnceLock<VerifierMap> = OnceLock::new();
VERIFIERS.get_or_init(Default::default)
}
pub fn router() -> Router {
@@ -61,10 +73,10 @@ pub fn router() -> Router {
async fn login(
Query(RedirectQuery { redirect }): Query<RedirectQuery>,
) -> anyhow::Result<Redirect> {
let client = oidc_client().load();
let client =
default_oidc_client().context("OIDC Client not configured")?;
client.as_ref().context("OIDC Client not configured")?;
// Generate a PKCE challenge.
let (pkce_challenge, pkce_verifier) =
PkceCodeChallenge::new_random_sha256();
@@ -75,13 +87,13 @@ async fn login(
CsrfToken::new_random,
Nonce::new_random,
)
.set_pkce_challenge(pkce_challenge)
.add_scope(Scope::new("openid".to_string()))
.add_scope(Scope::new("email".to_string()))
.set_pkce_challenge(pkce_challenge)
.url();
// Data inserted here will be matched on callback side for csrf protection.
csrf_verifier_tokens().insert(
verifier_tokens().insert(
csrf_token.secret().clone(),
(
pkce_verifier,
@@ -123,8 +135,9 @@ struct CallbackQuery {
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {
let client = oidc_client().load();
let client =
default_oidc_client().context("OIDC Client not configured")?;
client.as_ref().context("OIDC Client not configured")?;
if let Some(e) = query.error {
return Err(anyhow!("Provider returned error: {e}"));
@@ -136,9 +149,9 @@ async fn callback(
);
let (_, (pkce_verifier, nonce, redirect, valid_until)) =
csrf_verifier_tokens()
verifier_tokens()
.remove(state.secret())
.context("CSRF Token invalid")?;
.context("CSRF token invalid")?;
if komodo_timestamp() > valid_until {
return Err(anyhow!(
@@ -148,9 +161,9 @@ async fn callback(
let token_response = client
.exchange_code(AuthorizationCode::new(code))
// Set the PKCE code verifier.
.context("Failed to get Oauth token at exchange code")?
.set_pkce_verifier(pkce_verifier)
.request_async(openidconnect::reqwest::async_http_client)
.request_async(reqwest_client())
.await
.context("Failed to get Oauth token")?;
@@ -173,7 +186,7 @@ async fn callback(
let claims = id_token
.claims(&verifier, &nonce)
.context("Failed to verify token claims")?;
.context("Failed to verify token claims. This issue may be temporary (60 seconds max).")?;
// Verify the access token hash to ensure that the access token hasn't been substituted for
// another user's.
@@ -181,7 +194,8 @@ async fn callback(
{
let actual_access_token_hash = AccessTokenHash::from_token(
token_response.access_token(),
&id_token.signing_alg()?,
id_token.signing_alg()?,
id_token.signing_key(&verifier)?,
)?;
if actual_access_token_hash != *expected_access_token_hash {
return Err(anyhow!("Invalid access token"));
@@ -202,7 +216,7 @@ async fn callback(
let jwt = match user {
Some(user) => jwt_client()
.generate(user.id)
.encode(user.id)
.context("failed to generate jwt")?,
None => {
let ts = komodo_timestamp();
@@ -258,7 +272,7 @@ async fn callback(
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.generate(user_id)
.encode(user_id)
.context("failed to generate jwt")?
}
};

View File

@@ -1,22 +1,22 @@
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ec2::{
Client,
types::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,
InstanceStateName, InstanceStatus, InstanceType, ResourceType,
Tag, TagSpecification, VolumeType,
},
Client,
};
use base64::Engine;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
komodo_timestamp,
server_template::aws::AwsServerTemplateConfig,
ResourceTarget,
};
use crate::{alert::send_alerts, config::core_config};
@@ -29,20 +29,40 @@ pub struct Ec2Instance {
pub ip: String,
}
/// Provides credentials in the core config file to the AWS client
#[derive(Debug)]
struct CredentialsFromConfig;
impl aws_credential_types::provider::ProvideCredentials
for CredentialsFromConfig
{
fn provide_credentials<'a>(
&'a self,
) -> aws_credential_types::provider::future::ProvideCredentials<'a>
where
Self: 'a,
{
aws_credential_types::provider::future::ProvideCredentials::new(
async {
let config = core_config();
Ok(aws_credential_types::Credentials::new(
&config.aws.access_key_id,
&config.aws.secret_access_key,
None,
None,
"komodo-config",
))
},
)
}
}
#[instrument]
async fn create_ec2_client(region: String) -> Client {
// There may be a better way to pass these keys to client
std::env::set_var(
"AWS_ACCESS_KEY_ID",
&core_config().aws.access_key_id,
);
std::env::set_var(
"AWS_SECRET_ACCESS_KEY",
&core_config().aws.secret_access_key,
);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
let config = aws_config::defaults(BehaviorVersion::latest())
.region(region)
.credentials_provider(CredentialsFromConfig)
.load()
.await;
Client::new(&config)

View File

@@ -1,7 +1,7 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use axum::http::{HeaderName, HeaderValue};
use reqwest::{RequestBuilder, StatusCode};
use serde::{de::DeserializeOwned, Serialize};
use serde::{Serialize, de::DeserializeOwned};
use super::{
common::{

View File

@@ -3,7 +3,7 @@ use std::{
time::Duration,
};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use futures::future::join_all;
use komodo_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,

View File

@@ -182,6 +182,8 @@ pub fn core_config() -> &'static CoreConfig {
.unwrap_or(config.disable_user_registration),
disable_non_admin_create: env.komodo_disable_non_admin_create
.unwrap_or(config.disable_non_admin_create),
lock_login_credentials_for: env.komodo_lock_login_credentials_for
.unwrap_or(config.lock_login_credentials_for),
local_auth: env.komodo_local_auth
.unwrap_or(config.local_auth),
logging: LogConfig {

View File

@@ -89,7 +89,9 @@ impl DbClient {
client = client.address(address);
}
_ => {
error!("config.mongo not configured correctly. must pass either config.mongo.uri, or config.mongo.address + config.mongo.username? + config.mongo.password?");
error!(
"config.mongo not configured correctly. must pass either config.mongo.uri, or config.mongo.address + config.mongo.username? + config.mongo.password?"
);
std::process::exit(1)
}
}

View File

@@ -84,8 +84,8 @@ pub struct UpdateGuard<'a, States: Default + Send + 'static>(
&'a Mutex<States>,
);
impl<'a, States: Default + Send + 'static> Drop
for UpdateGuard<'a, States>
impl<States: Default + Send + 'static> Drop
for UpdateGuard<'_, States>
{
fn drop(&mut self) {
let mut lock = match self.0.lock() {

View File

@@ -1,27 +1,27 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use formatting::muted;
use komodo_client::entities::{
Version,
builder::{AwsBuilderConfig, Builder, BuilderConfig},
komodo_timestamp,
server::Server,
server_template::aws::AwsServerTemplateConfig,
update::{Log, Update},
Version,
};
use periphery_client::{
api::{self, GetVersionResponse},
PeripheryClient,
api::{self, GetVersionResponse},
};
use crate::{
cloud::{
aws::ec2::{
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
BuildCleanupData,
aws::ec2::{
Ec2Instance, launch_ec2_instance,
terminate_ec2_instance_with_retry,
},
},
config::core_config,
helpers::update::update_update,

View File

@@ -9,9 +9,9 @@ pub struct Cache<K: PartialEq + Eq + Hash, T: Clone + Default> {
}
impl<
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default,
> Cache<K, T>
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default,
> Cache<K, T>
{
#[instrument(level = "debug", skip(self))]
pub async fn get(&self, key: &K) -> Option<T> {
@@ -70,9 +70,9 @@ impl<
}
impl<
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default + Busy,
> Cache<K, T>
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default + Busy,
> Cache<K, T>
{
#[instrument(level = "debug", skip(self))]
pub async fn busy(&self, id: &K) -> bool {

View File

@@ -1,11 +1,11 @@
use std::sync::OnceLock;
use komodo_client::entities::update::{Update, UpdateListItem};
use tokio::sync::{broadcast, Mutex};
use tokio::sync::{Mutex, broadcast};
/// A channel sending (build_id, update_id)
pub fn build_cancel_channel(
) -> &'static BroadcastChannel<(String, Update)> {
pub fn build_cancel_channel()
-> &'static BroadcastChannel<(String, Update)> {
static BUILD_CANCEL_CHANNEL: OnceLock<
BroadcastChannel<(String, Update)>,
> = OnceLock::new();
@@ -13,8 +13,8 @@ pub fn build_cancel_channel(
}
/// A channel sending (repo_id, update_id)
pub fn repo_cancel_channel(
) -> &'static BroadcastChannel<(String, Update)> {
pub fn repo_cancel_channel()
-> &'static BroadcastChannel<(String, Update)> {
static REPO_CANCEL_CHANNEL: OnceLock<
BroadcastChannel<(String, Update)>,
> = OnceLock::new();

View File

@@ -1,7 +1,7 @@
use std::collections::HashSet;
use anyhow::Context;
use komodo_client::entities::{update::Update, SystemCommand};
use komodo_client::entities::{SystemCommand, update::Update};
use super::query::VariablesAndSecrets;

View File

@@ -1,27 +1,27 @@
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use futures::future::join_all;
use komodo_client::{
api::write::{CreateBuilder, CreateServer},
entities::{
ResourceTarget,
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
komodo_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::Log,
user::{system_user, User},
ResourceTarget,
user::{User, system_user},
},
};
use mongo_indexed::Document;
use mungos::{
find::find_collect,
mongodb::bson::{doc, oid::ObjectId, to_document, Bson},
mongodb::bson::{Bson, doc, oid::ObjectId, to_document},
};
use periphery_client::PeripheryClient;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use rand::Rng;
use resolver_api::Resolve;
use crate::{
@@ -54,8 +54,8 @@ pub fn empty_or_only_spaces(word: &str) -> bool {
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
.take(length)
.map(char::from)
.collect()
@@ -208,7 +208,9 @@ pub async fn startup_cleanup() {
async fn startup_in_progress_update_cleanup() {
let log = Log::error(
"Komodo shutdown",
String::from("Komodo shutdown during execution. If this is a build, the builder may not have been terminated.")
String::from(
"Komodo shutdown during execution. If this is a build, the builder may not have been terminated.",
),
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
@@ -319,7 +321,10 @@ pub async fn ensure_first_server_and_builder() {
{
Ok(server) => server,
Err(e) => {
error!("Failed to initialize 'first_server'. Failed to CreateServer. {:#}", e.error);
error!(
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
e.error
);
return;
}
}
@@ -342,6 +347,9 @@ pub async fn ensure_first_server_and_builder() {
})
.await
{
error!("Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}", e.error);
error!(
"Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}",
e.error
);
}
}
}

View File

@@ -1,7 +1,7 @@
use std::time::{Duration, Instant};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use anyhow::{Context, anyhow};
use formatting::{Color, bold, colored, format_serror, muted};
use futures::future::join_all;
use komodo_client::{
api::execute::*,
@@ -25,7 +25,7 @@ use crate::{
execute::{ExecuteArgs, ExecuteRequest},
write::WriteArgs,
},
resource::{list_full_for_user_using_pattern, KomodoResource},
resource::{KomodoResource, list_full_for_user_using_pattern},
state::db_client,
};
@@ -206,7 +206,7 @@ async fn execute_stage(
join_all(futures)
.await
.into_iter()
.collect::<anyhow::Result<_>>()?;
.collect::<anyhow::Result<Vec<_>>>()?;
Ok(())
}
@@ -1259,7 +1259,7 @@ impl ExtendBatch for BatchDeployStack {
fn single_execution(stack: String) -> Execution {
Execution::DeployStack(DeployStack {
stack,
service: None,
services: Vec::new(),
stop_time: None,
})
}
@@ -1280,7 +1280,7 @@ impl ExtendBatch for BatchDestroyStack {
fn single_execution(stack: String) -> Execution {
Execution::DestroyStack(DestroyStack {
stack,
service: None,
services: Vec::new(),
remove_orphans: false,
stop_time: None,
})

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use async_timing_util::{
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS,
ONE_DAY_MS, Timelength, unix_timestamp_ms, wait_until_timelength,
};
use futures::future::join_all;
use mungos::{find::find_collect, mongodb::bson::doc};
@@ -34,8 +34,9 @@ async fn prune_images() -> anyhow::Result<()> {
.await
.context("failed to get servers from db")?
.into_iter()
// This could be done in the mongo query, but rather have rust type system guarantee this.
.filter(|server| server.config.auto_prune)
.filter(|server| {
server.config.enabled && server.config.auto_prune
})
.map(|server| async move {
(
async {

View File

@@ -1,7 +1,8 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
action::Action,
alerter::Alerter,
build::Build,
@@ -17,15 +18,14 @@ use komodo_client::entities::{
sync::ResourceSync,
tag::Tag,
update::Update,
user::{admin_service_user, User},
user::{User, admin_service_user},
user_group::UserGroup,
variable::Variable,
Operation, ResourceTarget, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{
bson::{doc, oid::ObjectId, Document},
bson::{Document, doc, oid::ObjectId},
options::FindOneOptions,
},
};
@@ -359,8 +359,8 @@ pub struct VariablesAndSecrets {
pub secrets: HashMap<String, String>,
}
pub async fn get_variables_and_secrets(
) -> anyhow::Result<VariablesAndSecrets> {
pub async fn get_variables_and_secrets()
-> anyhow::Result<VariablesAndSecrets> {
let variables = find_collect(&db_client().variables, None, None)
.await
.context("failed to get all variables from db")?;

View File

@@ -1,5 +1,6 @@
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
@@ -13,7 +14,6 @@ use komodo_client::entities::{
sync::ResourceSync,
update::{Update, UpdateListItem},
user::User,
Operation, ResourceTarget,
};
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
@@ -263,7 +263,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDeploy(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::PullDeployment(data) => (
Operation::PullDeployment,
@@ -308,7 +308,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDestroyDeployment(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
// Build
@@ -319,7 +319,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchRunBuild(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::CancelBuild(data) => (
Operation::CancelBuild,
@@ -336,7 +336,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchCloneRepo(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::PullRepo(data) => (
Operation::PullRepo,
@@ -345,7 +345,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchPullRepo(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::BuildRepo(data) => (
Operation::BuildRepo,
@@ -354,7 +354,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchBuildRepo(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::CancelRepoBuild(data) => (
Operation::CancelRepoBuild,
@@ -371,7 +371,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchRunProcedure(_) => {
return Ok(Default::default())
return Ok(Default::default());
}
// Action
@@ -382,7 +382,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchRunAction(_) => {
return Ok(Default::default())
return Ok(Default::default());
}
// Server template
@@ -405,7 +405,7 @@ pub async fn init_execution_update(
// Stack
ExecuteRequest::DeployStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::DeployStackService
} else {
Operation::DeployStack
@@ -415,7 +415,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDeployStack(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::DeployStackIfChanged(data) => (
Operation::DeployStack,
@@ -424,10 +424,10 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDeployStackIfChanged(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
ExecuteRequest::StartStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::StartStackService
} else {
Operation::StartStack
@@ -437,7 +437,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::PullStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::PullStackService
} else {
Operation::PullStack
@@ -447,7 +447,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::RestartStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::RestartStackService
} else {
Operation::RestartStack
@@ -457,7 +457,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::PauseStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::PauseStackService
} else {
Operation::PauseStack
@@ -467,7 +467,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::UnpauseStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::UnpauseStackService
} else {
Operation::UnpauseStack
@@ -477,7 +477,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::StopStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::StopStackService
} else {
Operation::StopStack
@@ -487,7 +487,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::DestroyStack(data) => (
if data.service.is_some() {
if !data.services.is_empty() {
Operation::DestroyStackService
} else {
Operation::DestroyStack
@@ -497,7 +497,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDestroyStack(_data) => {
return Ok(Default::default())
return Ok(Default::default());
}
// Alerter

View File

@@ -1,4 +1,4 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use axum::http::HeaderMap;
use hex::ToHex;
use hmac::{Hmac, Mac};

View File

@@ -1,4 +1,4 @@
use anyhow::{anyhow, Context};
use anyhow::{Context, anyhow};
use serde::Deserialize;
use crate::{

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use axum::{http::HeaderMap, Router};
use axum::{Router, http::HeaderMap};
use komodo_client::entities::resource::Resource;
use tokio::sync::Mutex;

View File

@@ -22,7 +22,7 @@ use crate::{
helpers::update::init_execution_update,
};
use super::{ListenerLockCache, ANY_BRANCH};
use super::{ANY_BRANCH, ListenerLockCache};
// =======
// BUILD
@@ -231,7 +231,7 @@ impl StackExecution for DeployStack {
if stack.config.webhook_force_deploy {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack.id,
service: None,
services: Vec::new(),
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;

View File

@@ -1,4 +1,4 @@
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
use komodo_client::entities::{
action::Action, build::Build, procedure::Procedure, repo::Repo,
resource::Resource, stack::Stack, sync::ResourceSync,
@@ -11,13 +11,13 @@ use tracing::Instrument;
use crate::resource::KomodoResource;
use super::{
CustomSecret, VerifyBranch, VerifySecret,
resources::{
RepoWebhookOption, StackWebhookOption, SyncWebhookOption,
handle_action_webhook, handle_build_webhook,
handle_procedure_webhook, handle_repo_webhook,
handle_stack_webhook, handle_sync_webhook, RepoWebhookOption,
StackWebhookOption, SyncWebhookOption,
handle_stack_webhook, handle_sync_webhook,
},
CustomSecret, VerifyBranch, VerifySecret,
};
#[derive(Deserialize)]

View File

@@ -40,8 +40,8 @@ async fn app() -> anyhow::Result<()> {
tokio::join!(
// Init db_client check to crash on db init failure
state::init_db_client(),
// Init default OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::init_default_oidc_client()
// Manage OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::spawn_oidc_client_management()
);
tokio::join!(
// Maybe initialize first server
@@ -59,7 +59,6 @@ async fn app() -> anyhow::Result<()> {
resource::spawn_repo_state_refresh_loop();
resource::spawn_procedure_state_refresh_loop();
resource::spawn_action_state_refresh_loop();
resource::spawn_resource_sync_state_refresh_loop();
helpers::prune::spawn_prune_loop();
// Setup static frontend services

View File

@@ -1,9 +1,9 @@
use std::collections::HashMap;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
deployment::{Deployment, DeploymentState},
ResourceTarget,
};
use crate::{

View File

@@ -30,8 +30,8 @@ pub async fn check_alerts(ts: i64) {
}
#[instrument(level = "debug")]
async fn get_all_servers_map(
) -> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
async fn get_all_servers_map()
-> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
{
let servers = resource::list_full_for_user::<Server>(
ResourceQuery::default(),

View File

@@ -3,10 +3,10 @@ use std::{collections::HashMap, path::PathBuf, str::FromStr};
use anyhow::Context;
use derive_variants::ExtractVariant;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
komodo_timestamp, optional_string,
server::{Server, ServerState},
ResourceTarget,
};
use mongo_indexed::Indexed;
use mungos::{
@@ -85,7 +85,9 @@ pub async fn alert_servers(
id, name, region, ..
} => (id, name, region),
data => {
error!("got incorrect alert data in ServerStatus handler. got {data:?}");
error!(
"got incorrect alert data in ServerStatus handler. got {data:?}"
);
continue;
}
};
@@ -530,8 +532,8 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
}
#[instrument(level = "debug")]
async fn get_open_alerts(
) -> anyhow::Result<(OpenAlertMap, OpenDiskAlertMap)> {
async fn get_open_alerts()
-> anyhow::Result<(OpenAlertMap, OpenDiskAlertMap)> {
let alerts = find_collect(
&db_client().alerts,
doc! { "resolved": false },

View File

@@ -1,9 +1,9 @@
use std::collections::HashMap;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
stack::{Stack, StackState},
ResourceTarget,
};
use crate::{

View File

@@ -6,8 +6,8 @@ use komodo_client::entities::{
stack::ComposeProject,
};
use periphery_client::{
api::{GetDockerLists, GetDockerListsResponse},
PeripheryClient,
api::{GetDockerLists, GetDockerListsResponse},
};
pub async fn get_docker_lists(

View File

@@ -7,8 +7,7 @@ use komodo_client::entities::{
container::ContainerListItem, image::ImageListItem,
network::NetworkListItem, volume::VolumeListItem,
},
komodo_timestamp,
optional_string,
komodo_timestamp, optional_string,
server::{Server, ServerHealth, ServerState},
stack::{ComposeProject, StackService, StackState},
stats::SystemStats,

View File

@@ -1,5 +1,5 @@
use komodo_client::entities::stats::{
sum_disk_usage, SystemStatsRecord, TotalDiskUsage,
SystemStatsRecord, TotalDiskUsage, sum_disk_usage,
};
use crate::state::{db_client, server_status_cache};

View File

@@ -7,6 +7,7 @@ use anyhow::Context;
use komodo_client::{
api::execute::{Deploy, DeployStack},
entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
build::Build,
deployment::{Deployment, DeploymentImage, DeploymentState},
@@ -17,7 +18,6 @@ use komodo_client::{
komodo_timestamp,
stack::{Stack, StackService, StackServiceNames, StackState},
user::auto_redeploy_user,
ResourceTarget,
},
};
@@ -222,8 +222,8 @@ pub async fn update_deployment_cache(
}
/// (StackId, Service)
fn stack_alert_sent_cache(
) -> &'static Mutex<HashSet<(String, String)>> {
fn stack_alert_sent_cache()
-> &'static Mutex<HashSet<(String, String)>> {
static CACHE: OnceLock<Mutex<HashSet<(String, String)>>> =
OnceLock::new();
CACHE.get_or_init(Default::default)
@@ -322,8 +322,8 @@ pub async fn update_stack_cache(
}
}).collect::<Vec<_>>();
let mut update_available = false;
let mut images_with_update = Vec::new();
let mut services_to_update = Vec::new();
for service in services_with_containers.iter() {
if service.update_available {
@@ -336,7 +336,7 @@ pub async fn update_stack_cache(
.map(|c| c.state == ContainerStateStatusEnum::Running)
.unwrap_or_default()
{
update_available = true
services_to_update.push(service.service.clone());
}
}
}
@@ -346,7 +346,7 @@ pub async fn update_stack_cache(
&services,
containers,
);
if update_available
if !services_to_update.is_empty()
&& stack.config.auto_update
&& state == StackState::Running
&& !action_states()
@@ -358,11 +358,16 @@ pub async fn update_stack_cache(
{
let id = stack.id.clone();
let server_name = server_name.clone();
let services = if stack.config.auto_update_all_services {
Vec::new()
} else {
services_to_update
};
tokio::spawn(async move {
match execute::inner_handler(
ExecuteRequest::DeployStack(DeployStack {
stack: stack.name.clone(),
service: None,
services,
stop_time: None,
}),
auto_redeploy_user().to_owned(),

View File

@@ -2,6 +2,7 @@ use std::time::Duration;
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTargetVariant,
action::{
Action, ActionConfig, ActionConfigDiff, ActionInfo,
ActionListItem, ActionListItemInfo, ActionQuerySpecifics,
@@ -10,11 +11,10 @@ use komodo_client::entities::{
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOneOptions, Collection},
mongodb::{Collection, bson::doc, options::FindOneOptions},
};
use crate::state::{action_state_cache, action_states, db_client};

View File

@@ -1,5 +1,6 @@
use derive_variants::ExtractVariant;
use komodo_client::entities::{
Operation, ResourceTargetVariant,
alerter::{
Alerter, AlerterConfig, AlerterConfigDiff, AlerterListItem,
AlerterListItemInfo, AlerterQuerySpecifics, PartialAlerterConfig,
@@ -7,7 +8,6 @@ use komodo_client::entities::{
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::mongodb::Collection;

View File

@@ -2,6 +2,7 @@ use std::time::Duration;
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTargetVariant,
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo, BuildListItem,
BuildListItemInfo, BuildQuerySpecifics, BuildState,
@@ -13,11 +14,10 @@ use komodo_client::entities::{
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions, Collection},
mongodb::{Collection, bson::doc, options::FindOptions},
};
use crate::{

View File

@@ -1,5 +1,6 @@
use anyhow::Context;
use komodo_client::entities::{
MergePartial, Operation, ResourceTargetVariant,
builder::{
Builder, BuilderConfig, BuilderConfigDiff, BuilderConfigVariant,
BuilderListItem, BuilderListItemInfo, BuilderQuerySpecifics,
@@ -10,11 +11,10 @@ use komodo_client::entities::{
server::Server,
update::Update,
user::User,
MergePartial, Operation, ResourceTargetVariant,
};
use mungos::mongodb::{
bson::{doc, to_document, Document},
Collection,
bson::{Document, doc, to_document},
};
use crate::state::db_client;

Some files were not shown because too many files have changed in this diff Show More