Compare commits

..

63 Commits

Author SHA1 Message Date
mbecker20
c4301849ee ResourceSync: pending view toggle between "Execute" vs "Commit" sync direction 2025-03-01 19:31:53 -08:00
mbecker20
b2b934fc7c set branch on git init folder 2025-03-01 13:16:12 -08:00
mbecker20
25e6aca340 init sync file new repo 2025-03-01 12:58:08 -08:00
mbecker20
eb2ca707e2 Stack: Fix git repo new compose file initialization 2025-03-01 11:39:44 -08:00
mbecker20
0380bfb4b0 show provider usernames from config file 2025-03-01 00:31:01 -08:00
mbecker20
a24e164ab7 filters wrap 2025-02-28 21:41:26 -08:00
mbecker20
2889d41441 give server stat charts labels 2025-02-28 21:37:46 -08:00
mbecker20
757ea54f11 improve WriteComposeContentsToHost instrument fields 2025-02-26 14:55:58 -08:00
mbecker20
56d1077c57 ServerTemplate description 2025-02-26 14:55:58 -08:00
mbecker20
767855eb63 dev-3 2025-02-26 14:55:58 -08:00
mbecker20
b31b0bdfc8 use komodo_client.subscribe_to_update_websocket, and click indicator to reconnect 2025-02-26 14:55:58 -08:00
mbecker20
f9be5760f3 Fix unclear ComposePull log re #244 2025-02-26 14:55:58 -08:00
mbecker20
b3f0c45cf9 improve pull to git init on existing folder without .git 2025-02-26 14:55:58 -08:00
unsync
d830fa4816 feature: improve tables quick actions on mobile (#312)
* feature: improve tables quick actions on mobile

* review: fix gap4

* review: use flex-wrap
2025-02-26 14:55:58 -08:00
mbecker20
96dcae141b choose which stack services to include in logs 2025-02-26 14:55:58 -08:00
mbecker20
a5542616c8 fix api name chnage 2025-02-26 14:55:58 -08:00
mbecker20
4f1295322f 1.17.0-dev-2 2025-02-26 14:55:58 -08:00
mbecker20
51e95599ed Add all services stack log 2025-02-26 14:55:58 -08:00
mbecker20
63320fe58b improve update indicator style and also put on home screen 2025-02-26 14:55:58 -08:00
mbecker20
371cb7250f requery alerts more often 2025-02-26 14:55:58 -08:00
mbecker20
55f906ebbb FIx PullStack re #302 and record docker compose config on stack deploy 2025-02-26 14:55:58 -08:00
mbecker20
3f29f62ec3 improve First Login docs 2025-02-26 14:55:58 -08:00
unsync
18e587e4fc feature: allow docker image text to overflow in table (#301)
* feature: allow docker image text to overflow in table

* review: use break-words

* wip: revert line break in css file

* feature: update devcontainer node release
2025-02-26 14:55:58 -08:00
mbecker20
f4052c11d9 add save button to config bottom 2025-02-26 14:55:58 -08:00
mbecker20
56bbae49ff add config save button in desktop sidebar navigator 2025-02-26 14:55:58 -08:00
mbecker20
c77c1a9188 add donate button docsite 2025-02-26 14:55:58 -08:00
mbecker20
b763db7dab typescript subscribe_to_update_websocket 2025-02-26 14:55:58 -08:00
mbecker20
1d242039e2 docs new organization 2025-02-26 14:55:58 -08:00
mbecker20
03659317ed fix new compose images 2025-02-26 14:55:58 -08:00
mbecker20
3945f2f17e more legible favicon 2025-02-26 14:55:58 -08:00
mbecker20
579217fe77 fix login screen logo 2025-02-26 14:55:58 -08:00
mbecker20
19410a9b41 dev-1 2025-02-26 14:55:58 -08:00
mbecker20
0af88f18db remove example from cargo toml workspace 2025-02-26 14:55:58 -08:00
mbecker20
5011adbc83 mbecker20 -> moghtech 2025-02-26 14:55:58 -08:00
Maxwell Becker
70211e4159 Remove .git from remote_url (#299)
Remove .git from remote_url

Co-authored-by: Deon Marshall <dmarshall@ccp.com.au>
2025-02-26 14:55:58 -08:00
unsync
324cd508b3 feature: interpolate secrets in custom alerter (#289)
* feature: interpolate secrets in custom alerter

* fix rust warning

* review: sanitize errors

* review: sanitize error message
2025-02-26 14:55:58 -08:00
unsync
fcfc4fdc84 feature: add post_deploy command (#288)
* feature: add post_deploy command

* review: do not run post_deploy if deploy failed
2025-02-26 14:55:58 -08:00
mbecker20
4900b2116e 1.17.0-dev 2025-02-26 14:55:58 -08:00
unsync
b1e88a9bb1 feature: use the repo path instead of name in GetLatestCommit (#282)
* Update repo path handling in commit fetching

- Changed `name` to `path` for repository identification.
- Updated cache update function to use the new path field.
- Improved error message for non-directory repo paths.

* feat: use optional name and path in GetLatestCommit

* review: don't use optional for name

* review: use helper

* review: remove redundant to_string()
2025-02-26 14:55:58 -08:00
mbecker20
ca050dd50a update available deployment table 2025-02-26 14:55:58 -08:00
mbecker20
cac877e6bb show update available stack table 2025-02-26 14:55:58 -08:00
mbecker20
4af8a4a673 finish oidc comment 2025-02-26 14:55:58 -08:00
mbecker20
a3e4bd5cf2 clean up rust client websocket subscription 2025-02-26 14:55:58 -08:00
mbecker20
c718ee0d2c escape incoming sync backslashes (BREAKING) 2025-02-26 14:55:58 -08:00
mbecker20
990bef003a rename Test Alerter button 2025-02-26 14:55:58 -08:00
mbecker20
3e8d6e401b simplify network stats 2025-02-26 14:55:58 -08:00
mbecker20
7486e7466b komodo-logo 2025-02-26 14:55:58 -08:00
mbecker20
712270d281 higher quality / colored icons 2025-02-26 14:55:58 -08:00
mbecker20
d970d6d764 Add test alerter button 2025-02-26 14:55:58 -08:00
mbecker20
8bccadca08 fix last axum updates 2025-02-26 14:55:58 -08:00
mbecker20
b63f4cd972 axum update :param to {param} syntax 2025-02-26 14:55:58 -08:00
mbecker20
0757f14bcb rust 1.84.0 2025-02-26 14:55:58 -08:00
mbecker20
b7e6f033a6 test alert implementation 2025-02-26 14:55:58 -08:00
mbecker20
f47f729c71 add entities / message for test alerter 2025-02-26 14:55:58 -08:00
mbecker20
68b6dc62f6 the komodo env file should be highest priority over additional files 2025-02-26 14:55:58 -08:00
mbecker20
d6f5723755 clean up cors 2025-02-26 14:55:58 -08:00
mbecker20
5bff6e8cb9 just make it 1.17.0 2025-02-26 14:55:58 -08:00
mbecker20
7ae78c0eba bump aws deps 2025-02-26 14:55:58 -08:00
mbecker20
6293e1723b axum to 0.8 2025-02-26 14:55:58 -08:00
mbecker20
f0ad42f140 resource2 not really a benefit 2025-02-26 14:55:58 -08:00
mbecker20
ea4cd34d2a format 2025-02-26 14:55:58 -08:00
mbecker20
76b9f06709 fmt 2025-02-26 14:55:58 -08:00
mbecker20
f283919d56 resolver v3
add new ec2 instance types

clean up testing config

document the libraries a bit

clean up main

update sysinfo and otel

update client resolver 3.0

resolver v3 prog

clean up gitignore

implement periphery resolver v3

clean up

core read api v3

more prog

execute api

missing apis

compiling

1.16.13

work on more granular traits

prog on crud
2025-02-26 14:55:58 -08:00
259 changed files with 3206 additions and 4522 deletions

View File

@@ -23,7 +23,7 @@ services:
db:
extends:
file: ../dev.compose.yaml
file: ../test.compose.yaml
service: ferretdb
volumes:

867
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -8,8 +8,8 @@ members = [
]
[workspace.package]
version = "1.17.0-dev-7"
edition = "2024"
version = "1.17.0-dev-3"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/moghtech/komodo"
@@ -40,62 +40,61 @@ derive_variants = "1.0.0"
mongo_indexed = "2.0.1"
resolver_api = "3.0.0"
toml_pretty = "1.1.2"
mungos = "3.2.0"
mungos = "1.1.0"
svi = "1.0.1"
# ASYNC
reqwest = { version = "0.12.12", default-features = false, features = ["json", "rustls-tls-native-roots"] }
tokio = { version = "1.44.0", features = ["full"] }
reqwest = { version = "0.12.12", default-features = false, features = ["json", "rustls-tls"] }
tokio = { version = "1.43.0", features = ["full"] }
tokio-util = "0.7.13"
futures = "0.3.31"
futures-util = "0.3.31"
arc-swap = "1.7.1"
# SERVER
axum-extra = { version = "0.10.0", features = ["typed-header"] }
tower-http = { version = "0.6.2", features = ["fs", "cors"] }
axum-server = { version = "0.7.1", features = ["tls-rustls"] }
axum = { version = "0.8.1", features = ["ws", "json", "macros"] }
tokio-tungstenite = "0.26.2"
tokio-tungstenite = "0.26.1"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.219", features = ["derive"] }
strum = { version = "0.27.1", features = ["derive"] }
serde_json = "1.0.140"
serde = { version = "1.0.217", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.135"
serde_yaml = "0.9.34"
toml = "0.8.20"
toml = "0.8.19"
# ERROR
anyhow = "1.0.97"
thiserror = "2.0.12"
anyhow = "1.0.95"
thiserror = "2.0.11"
# LOGGING
opentelemetry-otlp = { version = "0.28.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.28.0", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27.0", features = ["tls-roots", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.27.1", features = ["rt-tokio"] }
tracing-subscriber = { version = "0.3.19", features = ["json"] }
opentelemetry-semantic-conventions = "0.28.0"
tracing-opentelemetry = "0.29.0"
opentelemetry = "0.28.0"
opentelemetry-semantic-conventions = "0.27.0"
tracing-opentelemetry = "0.28.0"
opentelemetry = "0.27.1"
tracing = "0.1.41"
# CONFIG
clap = { version = "4.5.32", features = ["derive"] }
clap = { version = "4.5.26", features = ["derive"] }
dotenvy = "0.15.7"
envy = "0.4.2"
# CRYPTO / AUTH
uuid = { version = "1.15.1", features = ["v4", "fast-rng", "serde"] }
jsonwebtoken = { version = "9.3.1", default-features = false }
openidconnect = "4.0.0"
uuid = { version = "1.12.0", features = ["v4", "fast-rng", "serde"] }
openidconnect = "3.5.0"
urlencoding = "2.1.3"
nom_pem = "4.0.0"
bcrypt = "0.17.0"
bcrypt = "0.16.0"
base64 = "0.22.1"
rustls = "0.23.23"
rustls = "0.23.21"
hmac = "0.12.1"
sha2 = "0.10.8"
rand = "0.9.0"
rand = "0.8.5"
jwt = "0.16.0"
hex = "0.4.3"
# SYSTEM
@@ -103,9 +102,8 @@ bollard = "0.18.1"
sysinfo = "0.33.1"
# CLOUD
aws-config = "1.6.0"
aws-sdk-ec2 = "1.117.0"
aws-credential-types = "1.2.2"
aws-config = "1.5.13"
aws-sdk-ec2 = "1.101.0"
# MISC
derive_builder = "0.20.2"

View File

@@ -1,7 +1,7 @@
## Builds the Komodo Core and Periphery binaries
## for a specific architecture.
FROM rust:1.85.0-bullseye AS builder
FROM rust:1.84.1-bullseye AS builder
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./

View File

@@ -37,10 +37,9 @@ mungos.workspace = true
slack.workspace = true
svi.workspace = true
# external
aws-credential-types.workspace = true
axum-server.workspace = true
ordered_hash_map.workspace = true
openidconnect.workspace = true
axum-server.workspace = true
urlencoding.workspace = true
aws-sdk-ec2.workspace = true
aws-config.workspace = true
@@ -52,7 +51,6 @@ serde_yaml.workspace = true
typeshare.workspace = true
octorust.workspace = true
wildcard.workspace = true
arc-swap.workspace = true
dashmap.workspace = true
tracing.workspace = true
reqwest.workspace = true
@@ -73,5 +71,5 @@ envy.workspace = true
rand.workspace = true
hmac.workspace = true
sha2.workspace = true
jsonwebtoken.workspace = true
jwt.workspace = true
hex.workspace = true

View File

@@ -1,7 +1,7 @@
## All in one, multi stage compile + runtime Docker build for your architecture.
# Build Core
FROM rust:1.85.0-bullseye AS core-builder
FROM rust:1.84.1-bullseye AS core-builder
WORKDIR /builder
COPY Cargo.toml Cargo.lock ./

View File

@@ -94,9 +94,7 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
let to = fmt_docker_container_state(to);
format!(
"📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
)
format!("📦 Deployment **{name}** is now **{to}**\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
}
AlertData::DeploymentImageUpdateAvailable {
id,
@@ -106,9 +104,7 @@ pub async fn send_alert(
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}"
)
format!("⬆ Deployment **{name}** has an update available\nserver: **{server_name}**\nimage: **{image}**\n{link}")
}
AlertData::DeploymentAutoUpdated {
id,
@@ -118,9 +114,7 @@ pub async fn send_alert(
image,
} => {
let link = resource_link(ResourceTargetVariant::Deployment, id);
format!(
"⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}"
)
format!("⬆ Deployment **{name}** was updated automatically ⏫\nserver: **{server_name}**\nimage: **{image}**\n{link}")
}
AlertData::StackStateChange {
id,
@@ -132,9 +126,7 @@ pub async fn send_alert(
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
let to = fmt_stack_state(to);
format!(
"🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}"
)
format!("🥞 Stack **{name}** is now {to}\nserver: **{server_name}**\nprevious: **{from}**\n{link}")
}
AlertData::StackImageUpdateAvailable {
id,
@@ -145,9 +137,7 @@ pub async fn send_alert(
image,
} => {
let link = resource_link(ResourceTargetVariant::Stack, id);
format!(
"⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}"
)
format!("⬆ Stack **{name}** has an update available\nserver: **{server_name}**\nservice: **{service}**\nimage: **{image}**\n{link}")
}
AlertData::StackAutoUpdated {
id,
@@ -160,17 +150,13 @@ pub async fn send_alert(
let images_label =
if images.len() > 1 { "images" } else { "image" };
let images = images.join(", ");
format!(
"⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}"
)
format!("⬆ Stack **{name}** was updated automatically ⏫\nserver: **{server_name}**\n{images_label}: **{images}**\n{link}")
}
AlertData::AwsBuilderTerminationFailed {
instance_id,
message,
} => {
format!(
"{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}"
)
format!("{level} | Failed to terminated AWS builder instance\ninstance id: **{instance_id}**\n{message}")
}
AlertData::ResourceSyncPendingUpdates { id, name } => {
let link =
@@ -181,9 +167,7 @@ pub async fn send_alert(
}
AlertData::BuildFailed { id, name, version } => {
let link = resource_link(ResourceTargetVariant::Build, id);
format!(
"{level} | Build **{name}** failed\nversion: **v{version}**\n{link}"
)
format!("{level} | Build **{name}** failed\nversion: **v{version}**\n{link}")
}
AlertData::RepoBuildFailed { id, name } => {
let link = resource_link(ResourceTargetVariant::Repo, id);

View File

@@ -1,21 +1,21 @@
use std::collections::HashSet;
use ::slack::types::Block;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use komodo_client::entities::{
ResourceTargetVariant,
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
alerter::*,
deployment::DeploymentState,
stack::StackState,
ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use std::collections::HashSet;
use tracing::Instrument;
use crate::{config::core_config, state::db_client};
use crate::helpers::interpolate::interpolate_variables_secrets_into_string;
use crate::helpers::query::get_variables_and_secrets;
use crate::{config::core_config, state::db_client};
mod discord;
mod slack;
@@ -136,6 +136,7 @@ async fn send_custom_alert(
url: &str,
alert: &Alert,
) -> anyhow::Result<()> {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
@@ -155,14 +156,9 @@ async fn send_custom_alert(
.send()
.await
.map_err(|e| {
let replacers =
secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error =
svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!(
"Error with request: {}",
sanitized_error
))
let replacers = secret_replacers.into_iter().collect::<Vec<_>>();
let sanitized_error = svi::replace_in_string(&format!("{e:?}"), &replacers);
anyhow::Error::msg(format!("Error with request: {}", sanitized_error))
})
.context("failed at post request to alerter")?;
let status = res.status();

View File

@@ -73,9 +73,7 @@ pub async fn send_alert(
let region = fmt_region(region);
match alert.level {
SeverityLevel::Ok => {
let text = format!(
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%*"
);
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%*");
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -89,9 +87,7 @@ pub async fn send_alert(
(text, blocks.into())
}
_ => {
let text = format!(
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈"
);
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈");
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -117,9 +113,7 @@ pub async fn send_alert(
let percentage = 100.0 * used_gb / total_gb;
match alert.level {
SeverityLevel::Ok => {
let text = format!(
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾"
);
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -136,9 +130,7 @@ pub async fn send_alert(
(text, blocks.into())
}
_ => {
let text = format!(
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾"
);
let text = format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾");
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -168,9 +160,7 @@ pub async fn send_alert(
let percentage = 100.0 * used_gb / total_gb;
match alert.level {
SeverityLevel::Ok => {
let text = format!(
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿"
);
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -179,17 +169,12 @@ pub async fn send_alert(
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}
_ => {
let text = format!(
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿"
);
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿");
let blocks = vec![
Block::header(level),
Block::section(format!(
@@ -198,10 +183,7 @@ pub async fn send_alert(
Block::section(format!(
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
)),
Block::section(resource_link(
ResourceTargetVariant::Server,
id,
)),
Block::section(resource_link(ResourceTargetVariant::Server, id)),
];
(text, blocks.into())
}

View File

@@ -1,6 +1,6 @@
use std::{sync::OnceLock, time::Instant};
use axum::{Router, http::HeaderMap, routing::post};
use axum::{http::HeaderMap, routing::post, Router};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::auth::*, entities::user::User};
use resolver_api::Resolve;
@@ -105,7 +105,8 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
&& !config.google_oauth.secret.is_empty(),
oidc: config.oidc_enabled
&& !config.oidc_provider.is_empty()
&& !config.oidc_client_id.is_empty(),
&& !config.oidc_client_id.is_empty()
&& !config.oidc_client_secret.is_empty(),
registration_disabled: config.disable_user_registration,
}
})

View File

@@ -67,7 +67,7 @@ impl Resolve<ExecuteArgs> for RunAction {
) -> serror::Result<Update> {
let mut action = resource::get_check_permissions::<Action>(
&self.action,
user,
&user,
PermissionLevel::Execute,
)
.await?;
@@ -111,31 +111,19 @@ impl Resolve<ExecuteArgs> for RunAction {
let path = core_config().action_directory.join(&file);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to initialize Action file parent directory {parent:?}"))?;
let _ = fs::create_dir_all(parent).await;
}
fs::write(&path, contents).await.with_context(|| {
format!("Failed to write action file to {path:?}")
})?;
let CoreConfig { ssl_enabled, .. } = core_config();
let https_cert_flag = if *ssl_enabled {
" --unsafely-ignore-certificate-errors=localhost"
} else {
""
};
let mut res = run_komodo_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!(
"deno run --allow-all{https_cert_flag} {}",
path.display()
),
format!("deno run --allow-all {}", path.display()),
false,
)
.await;
@@ -317,8 +305,8 @@ fn delete_file(
if name == file {
if let Err(e) = fs::remove_file(entry.path()).await {
warn!(
"Failed to clean up generated file after action execution | {e:#}"
);
"Failed to clean up generated file after action execution | {e:#}"
);
};
return true;
}

View File

@@ -1,6 +1,6 @@
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
@@ -82,7 +82,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
) -> serror::Result<Update> {
let mut build = resource::get_check_permissions::<Build>(
&self.build,
user,
&user,
PermissionLevel::Execute,
)
.await?;
@@ -517,7 +517,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
) -> serror::Result<Update> {
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
&user,
PermissionLevel::Execute,
)
.await?;
@@ -560,9 +560,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
)
.await
{
warn!(
"failed to set CancelBuild Update status Complete after timeout | {e:#}"
)
warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}")
}
});

View File

@@ -1,21 +1,21 @@
use std::{collections::HashSet, sync::OnceLock};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use cache::TimeoutCache;
use formatting::format_serror;
use komodo_client::{
api::execute::*,
entities::{
Version,
build::{Build, ImageRegistryConfig},
deployment::{
Deployment, DeploymentImage, extract_registry_domain,
extract_registry_domain, Deployment, DeploymentImage,
},
get_image_name, komodo_timestamp, optional_string,
permission::PermissionLevel,
server::Server,
update::{Log, Update},
user::User,
Version,
},
};
use periphery_client::api;
@@ -561,7 +561,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
setup_deployment_execution(&self.deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
@@ -610,7 +610,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
ExecuteArgs { user, update }: &ExecuteArgs,
) -> serror::Result<Update> {
let (deployment, server) =
setup_deployment_execution(&self.deployment, user).await?;
setup_deployment_execution(&self.deployment, &user).await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()

View File

@@ -1,17 +1,17 @@
use std::{pin::Pin, time::Instant};
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use axum_extra::{TypedHeader, headers::ContentType};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use derive_variants::{EnumVariants, ExtractVariant};
use formatting::format_serror;
use futures::future::join_all;
use komodo_client::{
api::execute::*,
entities::{
Operation,
update::{Log, Update},
user::User,
Operation,
},
};
use mungos::by_id::find_one_by_id;
@@ -25,7 +25,7 @@ use uuid::Uuid;
use crate::{
auth::auth_request,
helpers::update::{init_execution_update, update_update},
resource::{KomodoResource, list_full_for_user_using_pattern},
resource::{list_full_for_user_using_pattern, KomodoResource},
state::db_client,
};

View File

@@ -1,6 +1,6 @@
use std::pin::Pin;
use formatting::{Color, bold, colored, format_serror, muted};
use formatting::{bold, colored, format_serror, muted, Color};
use komodo_client::{
api::execute::{
BatchExecutionResponse, BatchRunProcedure, RunProcedure,

View File

@@ -1,6 +1,6 @@
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::{execute::*, write::RefreshRepoCache},
@@ -173,7 +173,7 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
ExecuteArgs { user, .. }: &ExecuteArgs,
) -> serror::Result<BatchExecutionResponse> {
Ok(
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
super::batch_execute::<BatchPullRepo>(&self.pattern, &user)
.await?,
)
}
@@ -187,7 +187,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
) -> serror::Result<Update> {
let mut repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
&user,
PermissionLevel::Execute,
)
.await?;
@@ -438,7 +438,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
return handle_builder_early_return(
update, repo.id, repo.name, false,
)
.await;
.await
.map_err(Into::into);
}
};
@@ -695,9 +696,7 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
)
.await
{
warn!(
"failed to set CancelRepoBuild Update status Complete after timeout | {e:#}"
)
warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}")
}
});

View File

@@ -1,4 +1,4 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::{execute::LaunchServer, write::CreateServer},

View File

@@ -41,7 +41,7 @@ impl super::BatchExecute for BatchDeployStack {
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DeployStack(DeployStack {
stack,
services: Vec::new(),
service: None,
stop_time: None,
})
}
@@ -87,13 +87,10 @@ impl Resolve<ExecuteArgs> for DeployStack {
update_update(update.clone()).await?;
if !self.services.is_empty() {
if let Some(service) = &self.service {
update.logs.push(Log::simple(
"Service/s",
format!(
"Execution requested for Stack service/s {}",
self.services.join(", ")
),
&format!("Service: {service}"),
format!("Execution requested for Stack service {service}"),
))
}
@@ -186,7 +183,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
} = periphery_client(&server)?
.request(ComposeUp {
stack: stack.clone(),
services: self.services,
service: self.service,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
@@ -374,7 +371,7 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
DeployStack {
stack: stack.name,
services: Vec::new(),
service: None,
stop_time: self.stop_time,
}
.resolve(&ExecuteArgs {
@@ -387,20 +384,15 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
pub async fn pull_stack_inner(
mut stack: Stack,
services: Vec<String>,
service: Option<String>,
server: &Server,
mut update: Option<&mut Update>,
) -> anyhow::Result<ComposePullResponse> {
if let Some(update) = update.as_mut() {
if !services.is_empty() {
update.logs.push(Log::simple(
"Service/s",
format!(
"Execution requested for Stack service/s {}",
services.join(", ")
),
))
}
if let (Some(service), Some(update)) = (&service, update.as_mut()) {
update.logs.push(Log::simple(
&format!("Service: {service}"),
format!("Execution requested for Stack service {service}"),
))
}
let git_token = crate::helpers::git_token(
@@ -451,7 +443,7 @@ pub async fn pull_stack_inner(
let res = periphery_client(server)?
.request(ComposePull {
stack,
services,
service,
git_token,
registry_token,
})
@@ -491,7 +483,7 @@ impl Resolve<ExecuteArgs> for PullStack {
let res = pull_stack_inner(
stack,
self.services,
self.service,
&server,
Some(&mut update),
)
@@ -513,7 +505,7 @@ impl Resolve<ExecuteArgs> for StartStack {
) -> serror::Result<Update> {
execute_compose::<StartStack>(
&self.stack,
self.services,
self.service,
user,
|state| state.starting = true,
update.clone(),
@@ -532,7 +524,7 @@ impl Resolve<ExecuteArgs> for RestartStack {
) -> serror::Result<Update> {
execute_compose::<RestartStack>(
&self.stack,
self.services,
self.service,
user,
|state| {
state.restarting = true;
@@ -553,7 +545,7 @@ impl Resolve<ExecuteArgs> for PauseStack {
) -> serror::Result<Update> {
execute_compose::<PauseStack>(
&self.stack,
self.services,
self.service,
user,
|state| state.pausing = true,
update.clone(),
@@ -572,7 +564,7 @@ impl Resolve<ExecuteArgs> for UnpauseStack {
) -> serror::Result<Update> {
execute_compose::<UnpauseStack>(
&self.stack,
self.services,
self.service,
user,
|state| state.unpausing = true,
update.clone(),
@@ -591,7 +583,7 @@ impl Resolve<ExecuteArgs> for StopStack {
) -> serror::Result<Update> {
execute_compose::<StopStack>(
&self.stack,
self.services,
self.service,
user,
|state| state.stopping = true,
update.clone(),
@@ -607,7 +599,7 @@ impl super::BatchExecute for BatchDestroyStack {
fn single_request(stack: String) -> ExecuteRequest {
ExecuteRequest::DestroyStack(DestroyStack {
stack,
services: Vec::new(),
service: None,
remove_orphans: false,
stop_time: None,
})
@@ -634,7 +626,7 @@ impl Resolve<ExecuteArgs> for DestroyStack {
) -> serror::Result<Update> {
execute_compose::<DestroyStack>(
&self.stack,
self.services,
self.service,
user,
|state| state.destroying = true,
update.clone(),

View File

@@ -1,11 +1,11 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use formatting::{Color, colored, format_serror};
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self, ResourceTargetVariant,
self,
action::Action,
alerter::Alerter,
build::Build,
@@ -21,24 +21,28 @@ use komodo_client::{
sync::ResourceSync,
update::{Log, Update},
user::sync_user,
ResourceTargetVariant,
},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::oid::ObjectId};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
api::write::WriteArgs,
helpers::{query::get_id_to_tags, update::update_update},
resource,
resource::{self, refresh_resource_sync_state_cache},
state::{action_states, db_client},
sync::{
AllResourcesById, ResourceSyncTrait,
deploy::{
SyncDeployParams, build_deploy_cache, deploy_from_cache,
build_deploy_cache, deploy_from_cache, SyncDeployParams,
},
execute::{ExecuteResourceSync, get_updates_for_execution},
execute::{get_updates_for_execution, ExecuteResourceSync},
remote::RemoteResources,
AllResourcesById, ResourceSyncTrait,
},
};
@@ -57,7 +61,7 @@ impl Resolve<ExecuteArgs> for RunSync {
} = self;
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, user, PermissionLevel::Execute)
>(&sync, &user, PermissionLevel::Execute)
.await?;
// get the action state for the sync (or insert default).
@@ -206,7 +210,7 @@ impl Resolve<ExecuteArgs> for RunSync {
let delete = sync.config.managed || sync.config.delete;
let server_deltas = if sync.config.include_resources {
let (servers_to_create, servers_to_update, servers_to_delete) =
get_updates_for_execution::<Server>(
resources.servers,
delete,
@@ -216,11 +220,22 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let stack_deltas = if sync.config.include_resources {
.await?;
let (
deployments_to_create,
deployments_to_update,
deployments_to_delete,
) = get_updates_for_execution::<Deployment>(
resources.deployments,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (stacks_to_create, stacks_to_update, stacks_to_delete) =
get_updates_for_execution::<Stack>(
resources.stacks,
delete,
@@ -230,25 +245,8 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let deployment_deltas = if sync.config.include_resources {
get_updates_for_execution::<Deployment>(
resources.deployments,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let build_deltas = if sync.config.include_resources {
.await?;
let (builds_to_create, builds_to_update, builds_to_delete) =
get_updates_for_execution::<Build>(
resources.builds,
delete,
@@ -258,11 +256,8 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let repo_deltas = if sync.config.include_resources {
.await?;
let (repos_to_create, repos_to_update, repos_to_delete) =
get_updates_for_execution::<Repo>(
resources.repos,
delete,
@@ -272,25 +267,22 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let procedure_deltas = if sync.config.include_resources {
get_updates_for_execution::<Procedure>(
resources.procedures,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let action_deltas = if sync.config.include_resources {
.await?;
let (
procedures_to_create,
procedures_to_update,
procedures_to_delete,
) = get_updates_for_execution::<Procedure>(
resources.procedures,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (actions_to_create, actions_to_update, actions_to_delete) =
get_updates_for_execution::<Action>(
resources.actions,
delete,
@@ -300,11 +292,8 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let builder_deltas = if sync.config.include_resources {
.await?;
let (builders_to_create, builders_to_update, builders_to_delete) =
get_updates_for_execution::<Builder>(
resources.builders,
delete,
@@ -314,11 +303,8 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let alerter_deltas = if sync.config.include_resources {
.await?;
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
get_updates_for_execution::<Alerter>(
resources.alerters,
delete,
@@ -328,38 +314,35 @@ impl Resolve<ExecuteArgs> for RunSync {
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let server_template_deltas = if sync.config.include_resources {
get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
let resource_sync_deltas = if sync.config.include_resources {
get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?
} else {
Default::default()
};
.await?;
let (
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
) = get_updates_for_execution::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
) = get_updates_for_execution::<entities::sync::ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
variables_to_create,
@@ -367,11 +350,12 @@ impl Resolve<ExecuteArgs> for RunSync {
variables_to_delete,
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.include_variables
&& sync.config.match_tags.is_empty()
{
crate::sync::variables::get_updates_for_execution(
resources.variables,
delete,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
)
.await?
} else {
@@ -383,11 +367,12 @@ impl Resolve<ExecuteArgs> for RunSync {
user_groups_to_delete,
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.include_user_groups
&& sync.config.match_tags.is_empty()
{
crate::sync::user_groups::get_updates_for_execution(
resources.user_groups,
delete,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
&all_resources,
)
.await?
@@ -396,17 +381,39 @@ impl Resolve<ExecuteArgs> for RunSync {
};
if deploy_cache.is_empty()
&& resource_sync_deltas.no_changes()
&& server_template_deltas.no_changes()
&& server_deltas.no_changes()
&& deployment_deltas.no_changes()
&& stack_deltas.no_changes()
&& build_deltas.no_changes()
&& builder_deltas.no_changes()
&& alerter_deltas.no_changes()
&& repo_deltas.no_changes()
&& procedure_deltas.no_changes()
&& action_deltas.no_changes()
&& resource_syncs_to_create.is_empty()
&& resource_syncs_to_update.is_empty()
&& resource_syncs_to_delete.is_empty()
&& server_templates_to_create.is_empty()
&& server_templates_to_update.is_empty()
&& server_templates_to_delete.is_empty()
&& servers_to_create.is_empty()
&& servers_to_update.is_empty()
&& servers_to_delete.is_empty()
&& deployments_to_create.is_empty()
&& deployments_to_update.is_empty()
&& deployments_to_delete.is_empty()
&& stacks_to_create.is_empty()
&& stacks_to_update.is_empty()
&& stacks_to_delete.is_empty()
&& builds_to_create.is_empty()
&& builds_to_update.is_empty()
&& builds_to_delete.is_empty()
&& builders_to_create.is_empty()
&& builders_to_update.is_empty()
&& builders_to_delete.is_empty()
&& alerters_to_create.is_empty()
&& alerters_to_update.is_empty()
&& alerters_to_delete.is_empty()
&& repos_to_create.is_empty()
&& repos_to_update.is_empty()
&& repos_to_delete.is_empty()
&& procedures_to_create.is_empty()
&& procedures_to_update.is_empty()
&& procedures_to_delete.is_empty()
&& actions_to_create.is_empty()
&& actions_to_update.is_empty()
&& actions_to_delete.is_empty()
&& user_groups_to_create.is_empty()
&& user_groups_to_update.is_empty()
&& user_groups_to_delete.is_empty()
@@ -449,57 +456,111 @@ impl Resolve<ExecuteArgs> for RunSync {
);
maybe_extend(
&mut update.logs,
ResourceSync::execute_sync_updates(resource_sync_deltas).await,
ResourceSync::execute_sync_updates(
resource_syncs_to_create,
resource_syncs_to_update,
resource_syncs_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
ServerTemplate::execute_sync_updates(server_template_deltas)
.await,
ServerTemplate::execute_sync_updates(
server_templates_to_create,
server_templates_to_update,
server_templates_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Server::execute_sync_updates(server_deltas).await,
Server::execute_sync_updates(
servers_to_create,
servers_to_update,
servers_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Alerter::execute_sync_updates(alerter_deltas).await,
Alerter::execute_sync_updates(
alerters_to_create,
alerters_to_update,
alerters_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Action::execute_sync_updates(action_deltas).await,
Action::execute_sync_updates(
actions_to_create,
actions_to_update,
actions_to_delete,
)
.await,
);
// Dependent on server
maybe_extend(
&mut update.logs,
Builder::execute_sync_updates(builder_deltas).await,
Builder::execute_sync_updates(
builders_to_create,
builders_to_update,
builders_to_delete,
)
.await,
);
maybe_extend(
&mut update.logs,
Repo::execute_sync_updates(repo_deltas).await,
Repo::execute_sync_updates(
repos_to_create,
repos_to_update,
repos_to_delete,
)
.await,
);
// Dependant on builder
maybe_extend(
&mut update.logs,
Build::execute_sync_updates(build_deltas).await,
Build::execute_sync_updates(
builds_to_create,
builds_to_update,
builds_to_delete,
)
.await,
);
// Dependant on server / build
maybe_extend(
&mut update.logs,
Deployment::execute_sync_updates(deployment_deltas).await,
Deployment::execute_sync_updates(
deployments_to_create,
deployments_to_update,
deployments_to_delete,
)
.await,
);
// stack only depends on server, but maybe will depend on build later.
maybe_extend(
&mut update.logs,
Stack::execute_sync_updates(stack_deltas).await,
Stack::execute_sync_updates(
stacks_to_create,
stacks_to_update,
stacks_to_delete,
)
.await,
);
// Dependant on everything
maybe_extend(
&mut update.logs,
Procedure::execute_sync_updates(procedure_deltas).await,
Procedure::execute_sync_updates(
procedures_to_create,
procedures_to_update,
procedures_to_delete,
)
.await,
);
// Execute the deploy cache
@@ -548,6 +609,21 @@ impl Resolve<ExecuteArgs> for RunSync {
}
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db.updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)

View File

@@ -45,7 +45,7 @@ impl Resolve<ReadArgs> for ListActions {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Action>(self.query, user, &all_tags)
resource::list_for_user::<Action>(self.query, &user, &all_tags)
.await?,
)
}
@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListFullActions {
};
Ok(
resource::list_full_for_user::<Action>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)
@@ -77,7 +77,7 @@ impl Resolve<ReadArgs> for GetActionActionState {
) -> serror::Result<ActionActionState> {
let action = resource::get_check_permissions::<Action>(
&self.action,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -98,7 +98,7 @@ impl Resolve<ReadArgs> for GetActionsSummary {
) -> serror::Result<GetActionsSummaryResponse> {
let actions = resource::list_full_for_user::<Action>(
Default::default(),
user,
&user,
&[],
)
.await

View File

@@ -61,7 +61,7 @@ impl Resolve<ReadArgs> for ListFullAlerters {
};
Ok(
resource::list_full_for_user::<Alerter>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)
@@ -75,7 +75,7 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
) -> serror::Result<GetAlertersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Alerter,
>(user)
>(&user)
.await?
{
Some(ids) => doc! {

View File

@@ -6,11 +6,11 @@ use futures::TryStreamExt;
use komodo_client::{
api::read::*,
entities::{
Operation,
build::{Build, BuildActionState, BuildListItem, BuildState},
config::core::CoreConfig,
permission::PermissionLevel,
update::UpdateStatus,
Operation,
},
};
use mungos::{

View File

@@ -75,7 +75,7 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
) -> serror::Result<GetBuildersSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
Builder,
>(user)
>(&user)
.await?
{
Some(ids) => doc! {

View File

@@ -1,6 +1,6 @@
use std::{cmp, collections::HashSet};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::read::*,
entities::{
@@ -51,20 +51,12 @@ impl Resolve<ReadArgs> for ListDeployments {
} else {
get_all_tags(None).await?
};
let only_update_available = self.query.specific.update_available;
let deployments = resource::list_for_user::<Deployment>(
self.query, user, &all_tags,
Ok(
resource::list_for_user::<Deployment>(
self.query, user, &all_tags,
)
.await?,
)
.await?;
let deployments = if only_update_available {
deployments
.into_iter()
.filter(|deployment| deployment.info.update_available)
.collect()
} else {
deployments
};
Ok(deployments)
}
}
@@ -289,7 +281,7 @@ impl Resolve<ReadArgs> for ListCommonDeploymentExtraArgs {
get_all_tags(None).await?
};
let deployments = resource::list_full_for_user::<Deployment>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await
.context("failed to get resources matching query")?;

View File

@@ -1,11 +1,10 @@
use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::{Context, anyhow};
use axum::{Extension, Router, middleware, routing::post};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use komodo_client::{
api::read::*,
entities::{
ResourceTarget,
build::Build,
builder::{Builder, BuilderConfig},
config::{DockerRegistry, GitProvider},
@@ -13,6 +12,7 @@ use komodo_client::{
server::Server,
sync::ResourceSync,
user::User,
ResourceTarget,
},
};
use resolver_api::Resolve;
@@ -315,7 +315,7 @@ impl Resolve<ReadArgs> for ListSecrets {
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`").into(),
);
)
}
};
if let Some(id) = server_id {
@@ -373,7 +373,7 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`").into(),
);
)
}
}
}
@@ -381,17 +381,17 @@ impl Resolve<ReadArgs> for ListGitProvidersFromConfig {
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
user,
&user,
&[]
),
resource::list_full_for_user::<Repo>(
Default::default(),
user,
&user,
&[]
),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
user,
&user,
&[]
),
)?;
@@ -473,7 +473,7 @@ impl Resolve<ReadArgs> for ListDockerRegistriesFromConfig {
_ => {
return Err(
anyhow!("target must be `Server` or `Builder`").into(),
);
)
}
}
}

View File

@@ -1,4 +1,4 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::read::{
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,

View File

@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListFullProcedures {
};
Ok(
resource::list_full_for_user::<Procedure>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)

View File

@@ -1,6 +1,6 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::api::read::*;
use mongo_indexed::{Document, doc};
use mongo_indexed::{doc, Document};
use mungos::{
by_id::find_one_by_id, find::find_collect,
mongodb::options::FindOptions,

View File

@@ -45,7 +45,7 @@ impl Resolve<ReadArgs> for ListRepos {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Repo>(self.query, user, &all_tags)
resource::list_for_user::<Repo>(self.query, &user, &all_tags)
.await?,
)
}
@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListFullRepos {
};
Ok(
resource::list_full_for_user::<Repo>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)

View File

@@ -4,14 +4,13 @@ use std::{
sync::{Arc, OnceLock},
};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use async_timing_util::{
FIFTEEN_SECONDS_MS, get_timelength_in_ms, unix_timestamp_ms,
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
};
use komodo_client::{
api::read::*,
entities::{
ResourceTarget,
deployment::Deployment,
docker::{
container::{Container, ContainerListItem},
@@ -26,6 +25,7 @@ use komodo_client::{
stack::{Stack, StackServiceNames},
stats::{SystemInformation, SystemProcess},
update::Log,
ResourceTarget,
},
};
use mungos::{
@@ -128,7 +128,7 @@ impl Resolve<ReadArgs> for ListServers {
get_all_tags(None).await?
};
Ok(
resource::list_for_user::<Server>(self.query, user, &all_tags)
resource::list_for_user::<Server>(self.query, &user, &all_tags)
.await?,
)
}
@@ -146,7 +146,7 @@ impl Resolve<ReadArgs> for ListFullServers {
};
Ok(
resource::list_full_for_user::<Server>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)
@@ -387,7 +387,7 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
) -> serror::Result<ListAllDockerContainersResponse> {
let servers = resource::list_for_user::<Server>(
Default::default(),
user,
&user,
&[],
)
.await?
@@ -533,7 +533,7 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
user,
&user,
)
.await?;

View File

@@ -76,7 +76,7 @@ impl Resolve<ReadArgs> for GetServerTemplatesSummary {
) -> serror::Result<GetServerTemplatesSummaryResponse> {
let query = match resource::get_resource_object_ids_for_user::<
ServerTemplate,
>(user)
>(&user)
.await?
{
Some(ids) => doc! {

View File

@@ -133,7 +133,7 @@ impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -164,7 +164,7 @@ impl Resolve<ReadArgs> for ListCommonStackBuildExtraArgs {
get_all_tags(None).await?
};
let stacks = resource::list_full_for_user::<Stack>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
@@ -194,25 +194,10 @@ impl Resolve<ReadArgs> for ListStacks {
} else {
get_all_tags(None).await?
};
let only_update_available = self.query.specific.update_available;
let stacks =
Ok(
resource::list_for_user::<Stack>(self.query, user, &all_tags)
.await?;
let stacks = if only_update_available {
stacks
.into_iter()
.filter(|stack| {
stack
.info
.services
.iter()
.any(|service| service.update_available)
})
.collect()
} else {
stacks
};
Ok(stacks)
.await?,
)
}
}

View File

@@ -6,6 +6,7 @@ use komodo_client::{
permission::PermissionLevel,
sync::{
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
ResourceSyncState,
},
},
};
@@ -15,7 +16,7 @@ use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{action_states, github_client},
state::{action_states, github_client, resource_sync_state_cache},
};
use super::ReadArgs;
@@ -28,7 +29,7 @@ impl Resolve<ReadArgs> for GetResourceSync {
Ok(
resource::get_check_permissions::<ResourceSync>(
&self.sync,
user,
&user,
PermissionLevel::Read,
)
.await?,
@@ -48,7 +49,7 @@ impl Resolve<ReadArgs> for ListResourceSyncs {
};
Ok(
resource::list_for_user::<ResourceSync>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)
@@ -67,7 +68,7 @@ impl Resolve<ReadArgs> for ListFullResourceSyncs {
};
Ok(
resource::list_full_for_user::<ResourceSync>(
self.query, user, &all_tags,
self.query, &user, &all_tags,
)
.await?,
)
@@ -111,6 +112,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
let mut res = GetResourceSyncsSummaryResponse::default();
let cache = resource_sync_state_cache();
let action_states = action_states();
for resource_sync in resource_syncs {
@@ -129,18 +131,30 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
res.failed += 1;
continue;
}
if action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?
.syncing
{
res.syncing += 1;
continue;
match (
cache.get(&resource_sync.id).await.unwrap_or_default(),
action_states
.resource_sync
.get(&resource_sync.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.syncing => {
res.syncing += 1;
}
(ResourceSyncState::Ok, _) => res.ok += 1,
(ResourceSyncState::Failed, _) => res.failed += 1,
(ResourceSyncState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the building state, since that comes from action states
(ResourceSyncState::Syncing, _) => {
unreachable!()
}
(ResourceSyncState::Pending, _) => {
unreachable!()
}
}
res.ok += 1;
}
Ok(res)

View File

@@ -6,12 +6,11 @@ use komodo_client::{
ListUserGroups,
},
entities::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment,
permission::PermissionLevel, procedure::Procedure, repo::Repo,
resource::ResourceQuery, server::Server,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, toml::ResourcesToml, user::User,
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, resource::ResourceQuery,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, toml::ResourcesToml, ResourceTarget,
},
};
use mungos::find::find_collect;
@@ -24,168 +23,156 @@ use crate::{
resource,
state::db_client,
sync::{
AllResourcesById,
toml::{TOML_PRETTY_OPTIONS, ToToml, convert_resource},
toml::{convert_resource, ToToml, TOML_PRETTY_OPTIONS},
user_groups::convert_user_groups,
AllResourcesById,
},
};
use super::ReadArgs;
async fn get_all_targets(
tags: &[String],
user: &User,
) -> anyhow::Result<Vec<ResourceTarget>> {
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Stack(resource.id)),
);
targets.extend(
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<Action>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags).build(),
user,
&all_tags,
)
.await?
.into_iter()
// These will already be filtered by [ExportResourcesToToml]
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
Ok(targets)
}
impl Resolve<ReadArgs> for ExportAllResourcesToToml {
async fn resolve(
self,
args: &ReadArgs,
) -> serror::Result<ExportAllResourcesToTomlResponse> {
let targets = if self.include_resources {
get_all_targets(&self.tags, &args.user).await?
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if self.tags.is_empty() {
vec![]
} else {
Vec::new()
get_all_tags(None).await?
};
let user_groups = if self.include_user_groups {
if args.user.admin {
find_collect(&db_client().user_groups, None, None)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|user_group| user_group.id)
.collect()
} else {
get_user_user_group_ids(&args.user.id).await?
}
let ReadArgs { user } = args;
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Alerter(resource.id)),
);
targets.extend(
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Builder(resource.id)),
);
targets.extend(
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Server(resource.id)),
);
targets.extend(
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Stack(resource.id)),
);
targets.extend(
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Deployment(resource.id)),
);
targets.extend(
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Build(resource.id)),
);
targets.extend(
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Repo(resource.id)),
);
targets.extend(
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<Action>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
);
targets.extend(
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(self.tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
// These will already be filtered by [ExportResourcesToToml]
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
);
let user_groups = if user.admin && self.tags.is_empty() {
find_collect(&db_client().user_groups, None, None)
.await
.context("failed to query db for user groups")?
.into_iter()
.map(|user_group| user_group.id)
.collect()
} else {
Vec::new()
get_user_user_group_ids(&user.id).await?
};
ExportResourcesToToml {
targets,
user_groups,
include_variables: self.include_variables,
include_variables: self.tags.is_empty(),
}
.resolve(args)
.await
@@ -211,7 +198,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Alerter(id) => {
let alerter = resource::get_check_permissions::<Alerter>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -225,7 +212,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::ResourceSync(id) => {
let sync = resource::get_check_permissions::<ResourceSync>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -244,7 +231,9 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::ServerTemplate(id) => {
let template = resource::get_check_permissions::<
ServerTemplate,
>(&id, user, PermissionLevel::Read)
>(
&id, &user, PermissionLevel::Read
)
.await?;
res.server_templates.push(
convert_resource::<ServerTemplate>(
@@ -258,7 +247,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Server(id) => {
let server = resource::get_check_permissions::<Server>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -273,7 +262,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
let mut builder =
resource::get_check_permissions::<Builder>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -288,7 +277,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Build(id) => {
let mut build = resource::get_check_permissions::<Build>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -304,7 +293,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
let mut deployment = resource::get_check_permissions::<
Deployment,
>(
&id, user, PermissionLevel::Read
&id, &user, PermissionLevel::Read
)
.await?;
Deployment::replace_ids(&mut deployment, &all);
@@ -318,7 +307,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Repo(id) => {
let mut repo = resource::get_check_permissions::<Repo>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -333,7 +322,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Stack(id) => {
let mut stack = resource::get_check_permissions::<Stack>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -349,7 +338,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
let mut procedure = resource::get_check_permissions::<
Procedure,
>(
&id, user, PermissionLevel::Read
&id, &user, PermissionLevel::Read
)
.await?;
Procedure::replace_ids(&mut procedure, &all);
@@ -363,7 +352,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
ResourceTarget::Action(id) => {
let mut action = resource::get_check_permissions::<Action>(
&id,
user,
&user,
PermissionLevel::Read,
)
.await?;

View File

@@ -1,10 +1,9 @@
use std::collections::HashMap;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
@@ -19,6 +18,7 @@ use komodo_client::{
sync::ResourceSync,
update::{Update, UpdateListItem},
user::User,
ResourceTarget,
},
};
use mungos::{
@@ -43,7 +43,7 @@ impl Resolve<ReadArgs> for ListUpdates {
self.query
} else {
let server_query =
resource::get_resource_ids_for_user::<Server>(user)
resource::get_resource_ids_for_user::<Server>(&user)
.await?
.map(|ids| {
doc! {
@@ -53,7 +53,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Server" });
let deployment_query =
resource::get_resource_ids_for_user::<Deployment>(user)
resource::get_resource_ids_for_user::<Deployment>(&user)
.await?
.map(|ids| {
doc! {
@@ -63,7 +63,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
let stack_query =
resource::get_resource_ids_for_user::<Stack>(user)
resource::get_resource_ids_for_user::<Stack>(&user)
.await?
.map(|ids| {
doc! {
@@ -73,7 +73,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Stack" });
let build_query =
resource::get_resource_ids_for_user::<Build>(user)
resource::get_resource_ids_for_user::<Build>(&user)
.await?
.map(|ids| {
doc! {
@@ -83,7 +83,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Build" });
let repo_query =
resource::get_resource_ids_for_user::<Repo>(user)
resource::get_resource_ids_for_user::<Repo>(&user)
.await?
.map(|ids| {
doc! {
@@ -93,7 +93,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Repo" });
let procedure_query =
resource::get_resource_ids_for_user::<Procedure>(user)
resource::get_resource_ids_for_user::<Procedure>(&user)
.await?
.map(|ids| {
doc! {
@@ -103,7 +103,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query =
resource::get_resource_ids_for_user::<Action>(user)
resource::get_resource_ids_for_user::<Action>(&user)
.await?
.map(|ids| {
doc! {
@@ -113,7 +113,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query =
resource::get_resource_ids_for_user::<Builder>(user)
resource::get_resource_ids_for_user::<Builder>(&user)
.await?
.map(|ids| {
doc! {
@@ -123,7 +123,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Builder" });
let alerter_query =
resource::get_resource_ids_for_user::<Alerter>(user)
resource::get_resource_ids_for_user::<Alerter>(&user)
.await?
.map(|ids| {
doc! {
@@ -133,7 +133,7 @@ impl Resolve<ReadArgs> for ListUpdates {
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query =
resource::get_resource_ids_for_user::<ServerTemplate>(user)
resource::get_resource_ids_for_user::<ServerTemplate>(&user)
.await?
.map(|ids| {
doc! {
@@ -144,7 +144,7 @@ impl Resolve<ReadArgs> for ListUpdates {
let resource_sync_query =
resource::get_resource_ids_for_user::<ResourceSync>(
user,
&user,
)
.await?
.map(|ids| {
@@ -242,12 +242,12 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::System(_) => {
return Err(
anyhow!("user must be admin to view system updates").into(),
);
)
}
ResourceTarget::Server(id) => {
resource::get_check_permissions::<Server>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -255,7 +255,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Deployment(id) => {
resource::get_check_permissions::<Deployment>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -263,7 +263,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Build(id) => {
resource::get_check_permissions::<Build>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -271,7 +271,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Repo(id) => {
resource::get_check_permissions::<Repo>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -279,7 +279,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Builder(id) => {
resource::get_check_permissions::<Builder>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -287,7 +287,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Alerter(id) => {
resource::get_check_permissions::<Alerter>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -295,7 +295,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Procedure(id) => {
resource::get_check_permissions::<Procedure>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -303,7 +303,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Action(id) => {
resource::get_check_permissions::<Action>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -311,7 +311,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -319,7 +319,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::ResourceSync(id) => {
resource::get_check_permissions::<ResourceSync>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;
@@ -327,7 +327,7 @@ impl Resolve<ReadArgs> for GetUpdate {
ResourceTarget::Stack(id) => {
resource::get_check_permissions::<Stack>(
id,
user,
&user,
PermissionLevel::Read,
)
.await?;

View File

@@ -1,4 +1,4 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::read::{
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
@@ -6,7 +6,7 @@ use komodo_client::{
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{UserConfig, admin_service_user},
entities::user::{admin_service_user, UserConfig},
};
use mungos::{
by_id::find_one_by_id,

View File

@@ -5,7 +5,7 @@ use komodo_client::api::read::*;
use mungos::{
find::find_collect,
mongodb::{
bson::{Document, doc, oid::ObjectId},
bson::{doc, oid::ObjectId, Document},
options::FindOptions,
},
};

View File

@@ -1,7 +1,7 @@
use std::{collections::VecDeque, time::Instant};
use anyhow::{Context, anyhow};
use axum::{Extension, Json, Router, middleware, routing::post};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Json, Router};
use derive_variants::EnumVariants;
use komodo_client::{
api::user::*,

View File

@@ -32,12 +32,12 @@ impl Resolve<WriteArgs> for CopyAction {
let Action { config, .. } =
resource::get_check_permissions::<Action>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Action>(&self.name, config.into(), user)
resource::create::<Action>(&self.name, config.into(), &user)
.await?,
)
}

View File

@@ -32,7 +32,7 @@ impl Resolve<WriteArgs> for CopyAlerter {
let Alerter { config, .. } =
resource::get_check_permissions::<Alerter>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;

View File

@@ -1,13 +1,13 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, NoData,
build::{Build, BuildInfo, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
update::Update,
CloneArgs, NoData,
},
};
use mongo_indexed::doc;
@@ -308,7 +308,7 @@ impl Resolve<WriteArgs> for DeleteBuildWebhook {
let build = resource::get_check_permissions::<Build>(
&self.build,
user,
&user,
PermissionLevel::Write,
)
.await?;

View File

@@ -37,7 +37,7 @@ impl Resolve<WriteArgs> for CopyBuilder {
)
.await?;
Ok(
resource::create::<Builder>(&self.name, config.into(), user)
resource::create::<Builder>(&self.name, config.into(), &user)
.await?,
)
}
@@ -72,6 +72,9 @@ impl Resolve<WriteArgs> for RenameBuilder {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Update> {
Ok(resource::rename::<Builder>(&self.id, &self.name, user).await?)
Ok(
resource::rename::<Builder>(&self.id, &self.name, &user)
.await?,
)
}
}

View File

@@ -1,8 +1,7 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::*,
entities::{
Operation,
deployment::{
Deployment, DeploymentImage, DeploymentState,
PartialDeploymentConfig, RestartMode,
@@ -13,6 +12,7 @@ use komodo_client::{
server::{Server, ServerState},
to_komodo_name,
update::Update,
Operation,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
@@ -58,8 +58,12 @@ impl Resolve<WriteArgs> for CopyDeployment {
)
.await?;
Ok(
resource::create::<Deployment>(&self.name, config.into(), user)
.await?,
resource::create::<Deployment>(
&self.name,
config.into(),
&user,
)
.await?,
)
}
}
@@ -153,7 +157,7 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
}
Ok(
resource::create::<Deployment>(&self.name, config, user)
resource::create::<Deployment>(&self.name, config, &user)
.await?,
)
}
@@ -176,7 +180,7 @@ impl Resolve<WriteArgs> for UpdateDeployment {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Deployment> {
Ok(
resource::update::<Deployment>(&self.id, self.config, user)
resource::update::<Deployment>(&self.id, self.config, &user)
.await?,
)
}
@@ -220,7 +224,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
}
let mut update =
make_update(&deployment, Operation::RenameDeployment, user);
make_update(&deployment, Operation::RenameDeployment, &user);
update_one_by_id(
&db_client().deployments,

View File

@@ -2,10 +2,10 @@ use anyhow::anyhow;
use komodo_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
ResourceTarget, action::Action, alerter::Alerter, build::Build,
builder::Builder, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, server_template::ServerTemplate,
stack::Stack, sync::ResourceSync,
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, ResourceTarget,
},
};
use resolver_api::Resolve;
@@ -27,13 +27,13 @@ impl Resolve<WriteArgs> for UpdateDescription {
"cannot update description of System resource target"
)
.into(),
);
)
}
ResourceTarget::Server(id) => {
resource::update_description::<Server>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -41,7 +41,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Deployment>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -49,7 +49,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Build>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -57,7 +57,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Repo>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -65,7 +65,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Builder>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -73,7 +73,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Alerter>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -81,7 +81,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Procedure>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -89,7 +89,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Action>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -97,7 +97,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<ServerTemplate>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -105,7 +105,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<ResourceSync>(
&id,
&self.description,
user,
&user,
)
.await?;
}
@@ -113,7 +113,7 @@ impl Resolve<WriteArgs> for UpdateDescription {
resource::update_description::<Stack>(
&id,
&self.description,
user,
&user,
)
.await?;
}

View File

@@ -1,7 +1,7 @@
use std::time::Instant;
use anyhow::Context;
use axum::{Extension, Router, middleware, routing::post};
use axum::{middleware, routing::post, Extension, Router};
use derive_variants::{EnumVariants, ExtractVariant};
use komodo_client::{api::write::*, entities::user::User};
use resolver_api::Resolve;
@@ -172,7 +172,6 @@ pub enum WriteRequest {
CreateTag(CreateTag),
DeleteTag(DeleteTag),
RenameTag(RenameTag),
UpdateTagColor(UpdateTagColor),
UpdateTagsOnResource(UpdateTagsOnResource),
// ==== VARIABLE ====

View File

@@ -1,17 +1,17 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::*,
entities::{
ResourceTarget, ResourceTargetVariant,
permission::{UserTarget, UserTargetVariant},
ResourceTarget, ResourceTargetVariant,
},
};
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
mongodb::{
bson::{Document, doc, oid::ObjectId},
bson::{doc, oid::ObjectId, Document},
options::UpdateOptions,
},
};

View File

@@ -32,7 +32,7 @@ impl Resolve<WriteArgs> for CopyProcedure {
let Procedure { config, .. } =
resource::get_check_permissions::<Procedure>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;

View File

@@ -1,9 +1,9 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::*,
entities::{
Operation, ResourceTarget,
provider::{DockerRegistryAccount, GitProviderAccount},
Operation, ResourceTarget,
},
};
use mungos::{
@@ -44,7 +44,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateGitProviderAccount,
user,
&user,
);
account.id = db_client()
@@ -114,7 +114,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
user,
&user,
);
let account = to_document(&self.account).context(
@@ -173,7 +173,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateGitProviderAccount,
user,
&user,
);
let db = db_client();
@@ -235,7 +235,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateDockerRegistryAccount,
user,
&user,
);
account.id = db_client()
@@ -298,8 +298,8 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
if username.is_empty() {
return Err(
anyhow!(
"cannot update docker registry account with empty username"
)
"cannot update docker registry account with empty username"
)
.into(),
);
}
@@ -310,7 +310,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
user,
&user,
);
let account = to_document(&self.account).context(
@@ -373,7 +373,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateDockerRegistryAccount,
user,
&user,
);
let db = db_client();

View File

@@ -1,10 +1,9 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
CloneArgs, NoData, Operation,
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
@@ -12,6 +11,7 @@ use komodo_client::{
server::Server,
to_komodo_name,
update::{Log, Update},
CloneArgs, NoData, Operation,
},
};
use mongo_indexed::doc;
@@ -53,12 +53,12 @@ impl Resolve<WriteArgs> for CopyRepo {
let Repo { config, .. } =
resource::get_check_permissions::<Repo>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;
Ok(
resource::create::<Repo>(&self.name, config.into(), user)
resource::create::<Repo>(&self.name, config.into(), &user)
.await?,
)
}
@@ -89,7 +89,7 @@ impl Resolve<WriteArgs> for RenameRepo {
) -> serror::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;
@@ -98,7 +98,7 @@ impl Resolve<WriteArgs> for RenameRepo {
|| !repo.config.path.is_empty()
{
return Ok(
resource::rename::<Repo>(&repo.id, &self.name, user).await?,
resource::rename::<Repo>(&repo.id, &self.name, &user).await?,
);
}
@@ -113,7 +113,7 @@ impl Resolve<WriteArgs> for RenameRepo {
let name = to_komodo_name(&self.name);
let mut update = make_update(&repo, Operation::RenameRepo, user);
let mut update = make_update(&repo, Operation::RenameRepo, &user);
update_one_by_id(
&db_client().repos,
@@ -171,7 +171,7 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
// repo should be able to do this.
let repo = resource::get_check_permissions::<Repo>(
&self.repo,
user,
&user,
PermissionLevel::Execute,
)
.await?;

View File

@@ -2,10 +2,10 @@ use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
Operation,
permission::PermissionLevel,
server::Server,
update::{Update, UpdateStatus},
Operation,
},
};
use periphery_client::api;
@@ -77,7 +77,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::CreateNetwork, user);
make_update(&server, Operation::CreateNetwork, &user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;

View File

@@ -24,7 +24,7 @@ impl Resolve<WriteArgs> for CreateServerTemplate {
resource::create::<ServerTemplate>(
&self.name,
self.config,
user,
&user,
)
.await?,
)
@@ -40,7 +40,7 @@ impl Resolve<WriteArgs> for CopyServerTemplate {
let ServerTemplate { config, .. } =
resource::get_check_permissions::<ServerTemplate>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;
@@ -48,7 +48,7 @@ impl Resolve<WriteArgs> for CopyServerTemplate {
resource::create::<ServerTemplate>(
&self.name,
config.into(),
user,
&user,
)
.await?,
)

View File

@@ -1,6 +1,6 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::{user::CreateApiKey, write::*},
entities::{

View File

@@ -1,15 +1,15 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
FileContents, NoData, Operation,
config::core::CoreConfig,
permission::PermissionLevel,
server::ServerState,
stack::{PartialStackConfig, Stack, StackInfo},
update::Update,
user::stack_user,
FileContents, NoData, Operation,
},
};
use mungos::mongodb::bson::{doc, to_document};
@@ -33,7 +33,7 @@ use crate::{
resource,
stack::{
get_stack_and_server,
remote::{RemoteComposeContents, get_repo_compose_contents},
remote::{get_repo_compose_contents, RemoteComposeContents},
services::extract_services_into_res,
},
state::{db_client, github_client},
@@ -114,7 +114,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
} = self;
let (mut stack, server) = get_stack_and_server(
&stack,
user,
&user,
PermissionLevel::Write,
true,
)
@@ -127,7 +127,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
}
let mut update =
make_update(&stack, Operation::WriteStackContents, user);
make_update(&stack, Operation::WriteStackContents, &user);
update.push_simple_log("File contents to write", &contents);
@@ -402,7 +402,7 @@ impl Resolve<WriteArgs> for RefreshStackCache {
if state == ServerState::Ok {
let name = stack.name.clone();
if let Err(e) =
pull_stack_inner(stack, Vec::new(), &server, None).await
pull_stack_inner(stack, None, &server, None).await
{
warn!(
"Failed to pull latest images for Stack {name} | {e:#}",

View File

@@ -1,11 +1,11 @@
use std::{collections::HashMap, path::PathBuf};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::{read::ExportAllResourcesToToml, write::*},
entities::{
self, CloneArgs, NoData, Operation, ResourceTarget,
self,
action::Action,
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
@@ -23,11 +23,11 @@ use komodo_client::{
stack::Stack,
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
SyncDeployUpdate,
},
to_komodo_name,
update::{Log, Update},
user::sync_user,
CloneArgs, NoData, Operation, ResourceTarget,
},
};
use mungos::{
@@ -49,11 +49,11 @@ use crate::{
query::get_id_to_tags,
update::{add_update, make_update, update_update},
},
resource,
resource::{self, refresh_resource_sync_state_cache},
state::{db_client, github_client},
sync::{
AllResourcesById, deploy::SyncDeployParams,
remote::RemoteResources, view::push_updates_for_view,
deploy::SyncDeployParams, remote::RemoteResources,
view::push_updates_for_view, AllResourcesById,
},
};
@@ -66,8 +66,12 @@ impl Resolve<WriteArgs> for CreateResourceSync {
WriteArgs { user }: &WriteArgs,
) -> serror::Result<ResourceSync> {
Ok(
resource::create::<ResourceSync>(&self.name, self.config, user)
.await?,
resource::create::<ResourceSync>(
&self.name,
self.config,
&user,
)
.await?,
)
}
}
@@ -81,7 +85,7 @@ impl Resolve<WriteArgs> for CopyResourceSync {
let ResourceSync { config, .. } =
resource::get_check_permissions::<ResourceSync>(
&self.id,
user,
&user,
PermissionLevel::Write,
)
.await?;
@@ -156,18 +160,12 @@ async fn write_sync_file_contents_on_host(
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize resource file parent directory {parent:?}"
)
})?;
let _ = fs::create_dir_all(parent).await;
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!(
"Failed to write resource file contents to {full_path:?}"
)
format!("Failed to write file contents to {full_path:?}")
})
{
update.push_error_log("Write File", format_serror(&e.into()));
@@ -225,11 +223,7 @@ async fn write_sync_file_contents_git(
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
fs::create_dir_all(parent).await.with_context(|| {
format!(
"Failed to initialize resource file parent directory {parent:?}"
)
})?;
let _ = fs::create_dir_all(parent).await;
}
// Ensure the folder is initialized as git repo.
@@ -263,9 +257,7 @@ async fn write_sync_file_contents_git(
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!(
"Failed to write resource file contents to {full_path:?}"
)
format!("Failed to write file contents to {full_path:?}")
})
{
update.push_error_log("Write File", format_serror(&e.into()));
@@ -347,7 +339,7 @@ impl Resolve<WriteArgs> for CommitSync {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&self.sync, user, PermissionLevel::Write)
>(&self.sync, &user, PermissionLevel::Write)
.await?;
let file_contents_empty = sync.config.file_contents_empty();
@@ -389,17 +381,14 @@ impl Resolve<WriteArgs> for CommitSync {
};
let res = ExportAllResourcesToToml {
include_resources: sync.config.include_resources,
tags: sync.config.match_tags.clone(),
include_variables: sync.config.include_variables,
include_user_groups: sync.config.include_user_groups,
}
.resolve(&ReadArgs {
user: sync_user().to_owned(),
})
.await?;
let mut update = make_update(&sync, Operation::CommitSync, user);
let mut update = make_update(&sync, Operation::CommitSync, &user);
update.id = add_update(update.clone()).await?;
update.logs.push(Log::simple("Resources", res.toml.clone()));
@@ -414,9 +403,7 @@ impl Resolve<WriteArgs> for CommitSync {
.join(to_komodo_name(&sync.name))
.join(&resource_path);
if let Some(parent) = file_path.parent() {
fs::create_dir_all(parent)
.await
.with_context(|| format!("Failed to initialize resource file parent directory {parent:?}"))?;
let _ = tokio::fs::create_dir_all(&parent).await;
};
if let Err(e) = tokio::fs::write(&file_path, &res.toml)
.await
@@ -496,6 +483,21 @@ impl Resolve<WriteArgs> for CommitSync {
};
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
@@ -546,174 +548,171 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
sync.info.pending_message = message;
if !sync.info.remote_errors.is_empty() {
return Err(anyhow!(
"Remote resources have errors. Cannot compute diffs."
));
return Err(
anyhow!(
"Remote resources have errors. Cannot compute diffs."
)
.into(),
);
}
let resources = resources?;
let delete = sync.config.managed || sync.config.delete;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
let (resource_updates, deploy_updates) =
if sync.config.include_resources {
let id_to_tags = get_id_to_tags(None).await?;
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| {
(deployment.name.clone(), deployment.clone())
})
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let deployments_by_name = all_resources
.deployments
.values()
.map(|deployment| {
(deployment.name.clone(), deployment.clone())
})
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
let deploy_updates =
crate::sync::deploy::get_updates_for_view(SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
})
.await;
let deploy_updates =
crate::sync::deploy::get_updates_for_view(
SyncDeployParams {
deployments: &resources.deployments,
deployment_map: &deployments_by_name,
stacks: &resources.stacks,
stack_map: &stacks_by_name,
all_resources: &all_resources,
},
)
.await;
let delete = sync.config.managed || sync.config.delete;
let mut diffs = Vec::new();
let mut diffs = Vec::new();
push_updates_for_view::<Server>(
resources.servers,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Stack>(
resources.stacks,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Deployment>(
resources.deployments,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Build>(
resources.builds,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Repo>(
resources.repos,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Procedure>(
resources.procedures,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Action>(
resources.actions,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Builder>(
resources.builders,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Alerter>(
resources.alerters,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
{
push_updates_for_view::<Server>(
resources.servers,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Stack>(
resources.stacks,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Deployment>(
resources.deployments,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Build>(
resources.builds,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Repo>(
resources.repos,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Procedure>(
resources.procedures,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Action>(
resources.actions,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Builder>(
resources.builders,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Alerter>(
resources.alerters,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ServerTemplate>(
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<ResourceSync>(
resources.resource_syncs,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
}
(diffs, deploy_updates)
} else {
(Vec::new(), SyncDeployUpdate::default())
};
let variable_updates = if sync.config.include_variables {
let variable_updates = if sync.config.match_tags.is_empty() {
crate::sync::variables::get_updates_for_view(
&resources.variables,
delete,
@@ -723,7 +722,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
Default::default()
};
let user_group_updates = if sync.config.include_user_groups {
let user_group_updates = if sync.config.match_tags.is_empty() {
crate::sync::user_groups::get_updates_for_view(
resources.user_groups,
delete,
@@ -735,7 +734,7 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
};
anyhow::Ok((
resource_updates,
diffs,
deploy_updates,
variable_updates,
user_group_updates,

View File

@@ -1,26 +1,17 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::{
CreateTag, DeleteTag, RenameTag, UpdateTagColor,
UpdateTagsOnResource, UpdateTagsOnResourceResponse,
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
UpdateTagsOnResourceResponse,
},
entities::{
ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
tag::{Tag, TagColor},
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, ResourceTarget,
},
};
use mungos::{
@@ -50,7 +41,6 @@ impl Resolve<WriteArgs> for CreateTag {
let mut tag = Tag {
id: Default::default(),
name: self.name,
color: TagColor::Slate,
owner: user.id.clone(),
};
@@ -78,7 +68,7 @@ impl Resolve<WriteArgs> for RenameTag {
return Err(anyhow!("tag name cannot be ObjectId").into());
}
get_tag_check_owner(&self.id, user).await?;
get_tag_check_owner(&self.id, &user).await?;
update_one_by_id(
&db_client().tags,
@@ -93,34 +83,13 @@ impl Resolve<WriteArgs> for RenameTag {
}
}
impl Resolve<WriteArgs> for UpdateTagColor {
#[instrument(name = "UpdateTagColor", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Tag> {
let tag = get_tag_check_owner(&self.tag, user).await?;
update_one_by_id(
&db_client().tags,
&tag.id,
doc! { "$set": { "color": self.color.as_ref() } },
None,
)
.await
.context("failed to rename tag on db")?;
Ok(get_tag(&self.tag).await?)
}
}
impl Resolve<WriteArgs> for DeleteTag {
#[instrument(name = "DeleteTag", skip(user))]
async fn resolve(
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<Tag> {
let tag = get_tag_check_owner(&self.id, user).await?;
let tag = get_tag_check_owner(&self.id, &user).await?;
tokio::try_join!(
resource::remove_tag_from_all::<Server>(&self.id),
@@ -149,7 +118,7 @@ impl Resolve<WriteArgs> for UpdateTagsOnResource {
let WriteArgs { user } = args;
match self.target {
ResourceTarget::System(_) => {
return Err(anyhow!("Invalid target type: System").into());
return Err(anyhow!("Invalid target type: System").into())
}
ResourceTarget::Build(id) => {
resource::get_check_permissions::<Build>(

View File

@@ -1,20 +1,18 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::{
DeleteUser, DeleteUserResponse, UpdateUserPassword,
UpdateUserPasswordResponse, UpdateUserUsername,
UpdateUserUsernameResponse,
},
entities::{NoData, user::UserConfig},
entities::{user::UserConfig, NoData},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
config::core_config, helpers::hash_password, state::db_client,
};
use crate::{helpers::hash_password, state::db_client};
use super::WriteArgs;
@@ -25,16 +23,6 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateUserUsernameResponse> {
for locked_username in &core_config().lock_login_credentials_for {
if locked_username == "__ALL__"
|| *locked_username == user.username
{
return Err(
anyhow!("User not allowed to update their username.")
.into(),
);
}
}
if self.username.is_empty() {
return Err(anyhow!("Username cannot be empty.").into());
}
@@ -68,16 +56,6 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
self,
WriteArgs { user }: &WriteArgs,
) -> serror::Result<UpdateUserPasswordResponse> {
for locked_username in &core_config().lock_login_credentials_for {
if locked_username == "__ALL__"
|| *locked_username == user.username
{
return Err(
anyhow!("User not allowed to update their password.")
.into(),
);
}
}
let UserConfig::Local { .. } = user.config else {
return Err(anyhow!("User is not local user").into());
};

View File

@@ -1,6 +1,6 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::{
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,

View File

@@ -1,7 +1,7 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::*,
entities::{Operation, ResourceTarget, variable::Variable},
entities::{variable::Variable, Operation, ResourceTarget},
};
use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
@@ -49,7 +49,7 @@ impl Resolve<WriteArgs> for CreateVariable {
let mut update = make_update(
ResourceTarget::system(),
Operation::CreateVariable,
user,
&user,
);
update
@@ -92,7 +92,7 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
let mut update = make_update(
ResourceTarget::system(),
Operation::UpdateVariableValue,
user,
&user,
);
let log = if variable.is_secret {

View File

@@ -1,11 +1,11 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio::sync::Mutex;
use crate::{
@@ -47,21 +47,15 @@ impl GithubOauthClient {
return None;
}
if host.is_empty() {
warn!(
"github oauth is enabled, but 'config.host' is not configured"
);
warn!("github oauth is enabled, but 'config.host' is not configured");
return None;
}
if id.is_empty() {
warn!(
"github oauth is enabled, but 'config.github_oauth.id' is not configured"
);
warn!("github oauth is enabled, but 'config.github_oauth.id' is not configured");
return None;
}
if secret.is_empty() {
warn!(
"github oauth is enabled, but 'config.github_oauth.secret' is not configured"
);
warn!("github oauth is enabled, but 'config.github_oauth.secret' is not configured");
return None;
}
GithubOauthClient {

View File

@@ -1,6 +1,6 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use axum::{
Router, extract::Query, response::Redirect, routing::get,
extract::Query, response::Redirect, routing::get, Router,
};
use komodo_client::entities::{
komodo_timestamp,
@@ -72,7 +72,7 @@ async fn callback(
.context("failed at find user query from database")?;
let jwt = match user {
Some(user) => jwt_client()
.encode(user.id)
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = komodo_timestamp();
@@ -109,7 +109,7 @@ async fn callback(
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.encode(user_id)
.generate(user_id)
.context("failed to generate jwt")?
}
};

View File

@@ -1,12 +1,13 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use jsonwebtoken::{DecodingKey, Validation, decode};
use anyhow::{anyhow, Context};
use jwt::Token;
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
use serde::{Deserialize, de::DeserializeOwned};
use serde::{de::DeserializeOwned, Deserialize};
use serde_json::Value;
use tokio::sync::Mutex;
use crate::{
@@ -48,21 +49,15 @@ impl GoogleOauthClient {
return None;
}
if host.is_empty() {
warn!(
"google oauth is enabled, but 'config.host' is not configured"
);
warn!("google oauth is enabled, but 'config.host' is not configured");
return None;
}
if id.is_empty() {
warn!(
"google oauth is enabled, but 'config.google_oauth.id' is not configured"
);
warn!("google oauth is enabled, but 'config.google_oauth.id' is not configured");
return None;
}
if secret.is_empty() {
warn!(
"google oauth is enabled, but 'config.google_oauth.secret' is not configured"
);
warn!("google oauth is enabled, but 'config.google_oauth.secret' is not configured");
return None;
}
let scopes = urlencoding::encode(
@@ -144,16 +139,10 @@ impl GoogleOauthClient {
&self,
id_token: &str,
) -> anyhow::Result<GoogleUser> {
let mut v = Validation::new(Default::default());
v.insecure_disable_signature_validation();
v.validate_aud = false;
let res = decode::<GoogleUser>(
id_token,
&DecodingKey::from_secret(b""),
&v,
)
.context("failed to decode google id token")?;
Ok(res.claims)
let t: Token<Value, GoogleUser, jwt::Unverified> =
Token::parse_unverified(id_token)
.context("failed to parse id_token")?;
Ok(t.claims().to_owned())
}
#[instrument(level = "debug", skip(self))]

View File

@@ -1,7 +1,7 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{
Router, extract::Query, response::Redirect, routing::get,
extract::Query, response::Redirect, routing::get, Router,
};
use komodo_client::entities::user::{User, UserConfig};
use mongo_indexed::Document;
@@ -81,7 +81,7 @@ async fn callback(
.context("failed at find user query from mongo")?;
let jwt = match user {
Some(user) => jwt_client()
.encode(user.id)
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = unix_timestamp_ms() as i64;
@@ -124,7 +124,7 @@ async fn callback(
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.encode(user_id)
.generate(user_id)
.context("failed to generate jwt")?
}
};

View File

@@ -1,15 +1,15 @@
use std::collections::HashMap;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use async_timing_util::{
Timelength, get_timelength_in_ms, unix_timestamp_ms,
};
use jsonwebtoken::{
DecodingKey, EncodingKey, Header, Validation, decode, encode,
get_timelength_in_ms, unix_timestamp_ms, Timelength,
};
use hmac::{Hmac, Mac};
use jwt::SignWithKey;
use komodo_client::entities::config::core::CoreConfig;
use mungos::mongodb::bson::doc;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
use crate::helpers::random_string;
@@ -24,10 +24,7 @@ pub struct JwtClaims {
}
pub struct JwtClient {
header: Header,
validation: Validation,
encoding_key: EncodingKey,
decoding_key: DecodingKey,
pub key: Hmac<Sha256>,
ttl_ms: u128,
exchange_tokens: ExchangeTokenMap,
}
@@ -39,11 +36,10 @@ impl JwtClient {
} else {
config.jwt_secret.clone()
};
let key = Hmac::new_from_slice(secret.as_bytes())
.context("failed at taking HmacSha256 of jwt secret")?;
Ok(JwtClient {
header: Header::default(),
validation: Validation::new(Default::default()),
encoding_key: EncodingKey::from_secret(secret.as_bytes()),
decoding_key: DecodingKey::from_secret(secret.as_bytes()),
key,
ttl_ms: get_timelength_in_ms(
config.jwt_ttl.to_string().parse()?,
),
@@ -51,7 +47,7 @@ impl JwtClient {
})
}
pub fn encode(&self, user_id: String) -> anyhow::Result<String> {
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
let iat = unix_timestamp_ms();
let exp = iat + self.ttl_ms;
let claims = JwtClaims {
@@ -59,14 +55,10 @@ impl JwtClient {
iat,
exp,
};
encode(&self.header, &claims, &self.encoding_key)
.context("failed at signing claim")
}
pub fn decode(&self, jwt: &str) -> anyhow::Result<JwtClaims> {
decode::<JwtClaims>(jwt, &self.decoding_key, &self.validation)
.map(|res| res.claims)
.context("failed to decode token claims")
let jwt = claims
.sign_with_key(&self.key)
.context("failed at signing claim")?;
Ok(jwt)
}
#[instrument(level = "debug", skip_all)]

View File

@@ -1,6 +1,6 @@
use std::str::FromStr;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use komodo_client::{
api::auth::{
@@ -85,7 +85,7 @@ impl Resolve<AuthArgs> for CreateLocalUser {
.to_string();
let jwt = jwt_client()
.encode(user_id)
.generate(user_id)
.context("failed to generate jwt for user")?;
Ok(CreateLocalUserResponse { jwt })
@@ -131,7 +131,7 @@ impl Resolve<AuthArgs> for LoginLocalUser {
}
let jwt = jwt_client()
.encode(user.id)
.generate(user.id)
.context("failed at generating jwt for user")?;
Ok(LoginLocalUserResponse { jwt })

View File

@@ -1,4 +1,5 @@
use anyhow::{Context, anyhow};
use ::jwt::VerifyWithKey;
use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::Request, http::HeaderMap, middleware::Next,
@@ -70,9 +71,7 @@ pub async fn get_user_id_from_headers(
}
_ => {
// AUTH FAIL
Err(anyhow!(
"must attach either AUTHORIZATION header with jwt OR pass X-API-KEY and X-API-SECRET"
))
Err(anyhow!("must attach either AUTHORIZATION header with jwt OR pass X-API-KEY and X-API-SECRET"))
}
}
}
@@ -94,7 +93,9 @@ pub async fn authenticate_check_enabled(
pub async fn auth_jwt_get_user_id(
jwt: &str,
) -> anyhow::Result<String> {
let claims: JwtClaims = jwt_client().decode(jwt)?;
let claims: JwtClaims = jwt
.verify_with_key(&jwt_client().key)
.context("failed to verify claims")?;
if claims.exp > unix_timestamp_ms() {
Ok(claims.id)
} else {

View File

@@ -1,94 +1,67 @@
use std::{sync::OnceLock, time::Duration};
use std::sync::OnceLock;
use anyhow::Context;
use arc_swap::ArcSwapOption;
use openidconnect::{
Client, ClientId, ClientSecret, EmptyAdditionalClaims,
EndpointMaybeSet, EndpointNotSet, EndpointSet, IssuerUrl,
RedirectUrl, StandardErrorResponse, core::*,
core::{CoreClient, CoreProviderMetadata},
reqwest::async_http_client,
ClientId, ClientSecret, IssuerUrl, RedirectUrl,
};
use crate::config::core_config;
type OidcClient = Client<
EmptyAdditionalClaims,
CoreAuthDisplay,
CoreGenderClaim,
CoreJweContentEncryptionAlgorithm,
CoreJsonWebKey,
CoreAuthPrompt,
StandardErrorResponse<CoreErrorResponseType>,
CoreTokenResponse,
CoreTokenIntrospectionResponse,
CoreRevocableToken,
CoreRevocationErrorResponse,
EndpointSet,
EndpointNotSet,
EndpointNotSet,
EndpointNotSet,
EndpointMaybeSet,
EndpointMaybeSet,
>;
static DEFAULT_OIDC_CLIENT: OnceLock<Option<CoreClient>> =
OnceLock::new();
pub fn oidc_client() -> &'static ArcSwapOption<OidcClient> {
static OIDC_CLIENT: OnceLock<ArcSwapOption<OidcClient>> =
OnceLock::new();
OIDC_CLIENT.get_or_init(Default::default)
pub fn default_oidc_client() -> Option<&'static CoreClient> {
DEFAULT_OIDC_CLIENT
.get()
.expect("OIDC client get before init")
.as_ref()
}
/// The OIDC client must be reinitialized to
/// pick up the latest provider JWKs. This
/// function spawns a management thread to do this
/// on a loop.
pub async fn spawn_oidc_client_management() {
pub async fn init_default_oidc_client() {
let config = core_config();
if !config.oidc_enabled
|| config.oidc_provider.is_empty()
|| config.oidc_client_id.is_empty()
|| config.oidc_client_secret.is_empty()
{
DEFAULT_OIDC_CLIENT
.set(None)
.expect("Default OIDC client initialized twice");
return;
}
reset_oidc_client()
async {
// Use OpenID Connect Discovery to fetch the provider metadata.
let provider_metadata = CoreProviderMetadata::discover_async(
IssuerUrl::new(config.oidc_provider.clone())?,
async_http_client,
)
.await
.context("Failed to initialize OIDC client.")
.unwrap();
tokio::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(60)).await;
if let Err(e) = reset_oidc_client().await {
warn!("Failed to reinitialize OIDC client | {e:#}");
}
}
});
}
.context(
"Failed to get OIDC /.well-known/openid-configuration",
)?;
async fn reset_oidc_client() -> anyhow::Result<()> {
let config = core_config();
// Use OpenID Connect Discovery to fetch the provider metadata.
let provider_metadata = CoreProviderMetadata::discover_async(
IssuerUrl::new(config.oidc_provider.clone())?,
super::reqwest_client(),
)
// Create an OpenID Connect client by specifying the client ID, client secret, authorization URL
// and token URL.
let client = CoreClient::from_provider_metadata(
provider_metadata,
ClientId::new(config.oidc_client_id.to_string()),
Some(ClientSecret::new(config.oidc_client_secret.to_string())),
)
// Set the URL the user will be redirected to after the authorization process.
.set_redirect_uri(RedirectUrl::new(format!(
"{}/auth/oidc/callback",
core_config().host
))?);
DEFAULT_OIDC_CLIENT
.set(Some(client))
.expect("Default OIDC client initialized twice");
anyhow::Ok(())
}
.await
.context("Failed to get OIDC /.well-known/openid-configuration")?;
let client = CoreClient::from_provider_metadata(
provider_metadata,
ClientId::new(config.oidc_client_id.to_string()),
// The secret may be empty / ommitted if auth provider supports PKCE
if config.oidc_client_secret.is_empty() {
None
} else {
Some(ClientSecret::new(config.oidc_client_secret.to_string()))
},
)
// Set the URL the user will be redirected to after the authorization process.
.set_redirect_uri(RedirectUrl::new(format!(
"{}/auth/oidc/callback",
core_config().host
))?);
oidc_client().store(Some(client.into()));
Ok(())
.context("Failed to init default OIDC client")
.unwrap();
}

View File

@@ -1,20 +1,20 @@
use std::sync::OnceLock;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use axum::{
Router, extract::Query, response::Redirect, routing::get,
extract::Query, response::Redirect, routing::get, Router,
};
use client::oidc_client;
use client::default_oidc_client;
use dashmap::DashMap;
use komodo_client::entities::{
komodo_timestamp,
user::{User, UserConfig},
};
use mungos::mongodb::bson::{Document, doc};
use mungos::mongodb::bson::{doc, Document};
use openidconnect::{
AccessTokenHash, AuthorizationCode, CsrfToken, Nonce,
OAuth2TokenResponse, PkceCodeChallenge, PkceCodeVerifier, Scope,
TokenResponse, core::CoreAuthenticationFlow,
core::CoreAuthenticationFlow, AccessTokenHash, AuthorizationCode,
CsrfToken, Nonce, OAuth2TokenResponse, PkceCodeChallenge,
PkceCodeVerifier, Scope, TokenResponse,
};
use reqwest::StatusCode;
use serde::Deserialize;
@@ -29,28 +29,16 @@ use super::RedirectQuery;
pub mod client;
fn reqwest_client() -> &'static reqwest::Client {
static REQWEST: OnceLock<reqwest::Client> = OnceLock::new();
REQWEST.get_or_init(|| {
reqwest::Client::builder()
.redirect(reqwest::redirect::Policy::none())
.build()
.expect("Invalid OIDC reqwest client")
})
}
/// CSRF tokens can only be used once from the callback,
/// and must be used within this timeframe
const CSRF_VALID_FOR_MS: i64 = 120_000; // 2 minutes for user to log in.
type RedirectUrl = Option<String>;
/// Maps the csrf secrets to other information added in the "login" method (before auth provider redirect).
/// This information is retrieved in the "callback" method (after auth provider redirect).
type VerifierMap =
type CsrfMap =
DashMap<String, (PkceCodeVerifier, Nonce, RedirectUrl, i64)>;
fn verifier_tokens() -> &'static VerifierMap {
static VERIFIERS: OnceLock<VerifierMap> = OnceLock::new();
VERIFIERS.get_or_init(Default::default)
fn csrf_verifier_tokens() -> &'static CsrfMap {
static CSRF: OnceLock<CsrfMap> = OnceLock::new();
CSRF.get_or_init(Default::default)
}
pub fn router() -> Router {
@@ -73,10 +61,10 @@ pub fn router() -> Router {
async fn login(
Query(RedirectQuery { redirect }): Query<RedirectQuery>,
) -> anyhow::Result<Redirect> {
let client = oidc_client().load();
let client =
client.as_ref().context("OIDC Client not configured")?;
default_oidc_client().context("OIDC Client not configured")?;
// Generate a PKCE challenge.
let (pkce_challenge, pkce_verifier) =
PkceCodeChallenge::new_random_sha256();
@@ -87,13 +75,13 @@ async fn login(
CsrfToken::new_random,
Nonce::new_random,
)
.set_pkce_challenge(pkce_challenge)
.add_scope(Scope::new("openid".to_string()))
.add_scope(Scope::new("email".to_string()))
.set_pkce_challenge(pkce_challenge)
.url();
// Data inserted here will be matched on callback side for csrf protection.
verifier_tokens().insert(
csrf_verifier_tokens().insert(
csrf_token.secret().clone(),
(
pkce_verifier,
@@ -135,9 +123,8 @@ struct CallbackQuery {
async fn callback(
Query(query): Query<CallbackQuery>,
) -> anyhow::Result<Redirect> {
let client = oidc_client().load();
let client =
client.as_ref().context("OIDC Client not configured")?;
default_oidc_client().context("OIDC Client not configured")?;
if let Some(e) = query.error {
return Err(anyhow!("Provider returned error: {e}"));
@@ -149,9 +136,9 @@ async fn callback(
);
let (_, (pkce_verifier, nonce, redirect, valid_until)) =
verifier_tokens()
csrf_verifier_tokens()
.remove(state.secret())
.context("CSRF token invalid")?;
.context("CSRF Token invalid")?;
if komodo_timestamp() > valid_until {
return Err(anyhow!(
@@ -161,9 +148,9 @@ async fn callback(
let token_response = client
.exchange_code(AuthorizationCode::new(code))
.context("Failed to get Oauth token at exchange code")?
// Set the PKCE code verifier.
.set_pkce_verifier(pkce_verifier)
.request_async(reqwest_client())
.request_async(openidconnect::reqwest::async_http_client)
.await
.context("Failed to get Oauth token")?;
@@ -186,7 +173,7 @@ async fn callback(
let claims = id_token
.claims(&verifier, &nonce)
.context("Failed to verify token claims. This issue may be temporary (60 seconds max).")?;
.context("Failed to verify token claims")?;
// Verify the access token hash to ensure that the access token hasn't been substituted for
// another user's.
@@ -194,8 +181,7 @@ async fn callback(
{
let actual_access_token_hash = AccessTokenHash::from_token(
token_response.access_token(),
id_token.signing_alg()?,
id_token.signing_key(&verifier)?,
&id_token.signing_alg()?,
)?;
if actual_access_token_hash != *expected_access_token_hash {
return Err(anyhow!("Invalid access token"));
@@ -216,7 +202,7 @@ async fn callback(
let jwt = match user {
Some(user) => jwt_client()
.encode(user.id)
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = komodo_timestamp();
@@ -272,7 +258,7 @@ async fn callback(
.context("inserted_id is not ObjectId")?
.to_string();
jwt_client()
.encode(user_id)
.generate(user_id)
.context("failed to generate jwt")?
}
};

View File

@@ -1,22 +1,22 @@
use std::{str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use aws_config::{BehaviorVersion, Region};
use aws_sdk_ec2::{
Client,
types::{
BlockDeviceMapping, EbsBlockDevice,
InstanceNetworkInterfaceSpecification, InstanceStateChange,
InstanceStateName, InstanceStatus, InstanceType, ResourceType,
Tag, TagSpecification, VolumeType,
},
Client,
};
use base64::Engine;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
komodo_timestamp,
server_template::aws::AwsServerTemplateConfig,
ResourceTarget,
};
use crate::{alert::send_alerts, config::core_config};
@@ -29,40 +29,20 @@ pub struct Ec2Instance {
pub ip: String,
}
/// Provides credentials in the core config file to the AWS client
#[derive(Debug)]
struct CredentialsFromConfig;
impl aws_credential_types::provider::ProvideCredentials
for CredentialsFromConfig
{
fn provide_credentials<'a>(
&'a self,
) -> aws_credential_types::provider::future::ProvideCredentials<'a>
where
Self: 'a,
{
aws_credential_types::provider::future::ProvideCredentials::new(
async {
let config = core_config();
Ok(aws_credential_types::Credentials::new(
&config.aws.access_key_id,
&config.aws.secret_access_key,
None,
None,
"komodo-config",
))
},
)
}
}
#[instrument]
async fn create_ec2_client(region: String) -> Client {
// There may be a better way to pass these keys to client
std::env::set_var(
"AWS_ACCESS_KEY_ID",
&core_config().aws.access_key_id,
);
std::env::set_var(
"AWS_SECRET_ACCESS_KEY",
&core_config().aws.secret_access_key,
);
let region = Region::new(region);
let config = aws_config::defaults(BehaviorVersion::latest())
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.region(region)
.credentials_provider(CredentialsFromConfig)
.load()
.await;
Client::new(&config)

View File

@@ -1,7 +1,7 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use axum::http::{HeaderName, HeaderValue};
use reqwest::{RequestBuilder, StatusCode};
use serde::{Serialize, de::DeserializeOwned};
use serde::{de::DeserializeOwned, Serialize};
use super::{
common::{

View File

@@ -3,7 +3,7 @@ use std::{
time::Duration,
};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use komodo_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,

View File

@@ -182,8 +182,6 @@ pub fn core_config() -> &'static CoreConfig {
.unwrap_or(config.disable_user_registration),
disable_non_admin_create: env.komodo_disable_non_admin_create
.unwrap_or(config.disable_non_admin_create),
lock_login_credentials_for: env.komodo_lock_login_credentials_for
.unwrap_or(config.lock_login_credentials_for),
local_auth: env.komodo_local_auth
.unwrap_or(config.local_auth),
logging: LogConfig {

View File

@@ -89,9 +89,7 @@ impl DbClient {
client = client.address(address);
}
_ => {
error!(
"config.mongo not configured correctly. must pass either config.mongo.uri, or config.mongo.address + config.mongo.username? + config.mongo.password?"
);
error!("config.mongo not configured correctly. must pass either config.mongo.uri, or config.mongo.address + config.mongo.username? + config.mongo.password?");
std::process::exit(1)
}
}

View File

@@ -84,8 +84,8 @@ pub struct UpdateGuard<'a, States: Default + Send + 'static>(
&'a Mutex<States>,
);
impl<States: Default + Send + 'static> Drop
for UpdateGuard<'_, States>
impl<'a, States: Default + Send + 'static> Drop
for UpdateGuard<'a, States>
{
fn drop(&mut self) {
let mut lock = match self.0.lock() {

View File

@@ -1,27 +1,27 @@
use std::time::Duration;
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use formatting::muted;
use komodo_client::entities::{
Version,
builder::{AwsBuilderConfig, Builder, BuilderConfig},
komodo_timestamp,
server::Server,
server_template::aws::AwsServerTemplateConfig,
update::{Log, Update},
Version,
};
use periphery_client::{
PeripheryClient,
api::{self, GetVersionResponse},
PeripheryClient,
};
use crate::{
cloud::{
BuildCleanupData,
aws::ec2::{
Ec2Instance, launch_ec2_instance,
terminate_ec2_instance_with_retry,
launch_ec2_instance, terminate_ec2_instance_with_retry,
Ec2Instance,
},
BuildCleanupData,
},
config::core_config,
helpers::update::update_update,

View File

@@ -9,9 +9,9 @@ pub struct Cache<K: PartialEq + Eq + Hash, T: Clone + Default> {
}
impl<
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default,
> Cache<K, T>
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default,
> Cache<K, T>
{
#[instrument(level = "debug", skip(self))]
pub async fn get(&self, key: &K) -> Option<T> {
@@ -70,9 +70,9 @@ impl<
}
impl<
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default + Busy,
> Cache<K, T>
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
T: Clone + Default + Busy,
> Cache<K, T>
{
#[instrument(level = "debug", skip(self))]
pub async fn busy(&self, id: &K) -> bool {

View File

@@ -1,11 +1,11 @@
use std::sync::OnceLock;
use komodo_client::entities::update::{Update, UpdateListItem};
use tokio::sync::{Mutex, broadcast};
use tokio::sync::{broadcast, Mutex};
/// A channel sending (build_id, update_id)
pub fn build_cancel_channel()
-> &'static BroadcastChannel<(String, Update)> {
pub fn build_cancel_channel(
) -> &'static BroadcastChannel<(String, Update)> {
static BUILD_CANCEL_CHANNEL: OnceLock<
BroadcastChannel<(String, Update)>,
> = OnceLock::new();
@@ -13,8 +13,8 @@ pub fn build_cancel_channel()
}
/// A channel sending (repo_id, update_id)
pub fn repo_cancel_channel()
-> &'static BroadcastChannel<(String, Update)> {
pub fn repo_cancel_channel(
) -> &'static BroadcastChannel<(String, Update)> {
static REPO_CANCEL_CHANNEL: OnceLock<
BroadcastChannel<(String, Update)>,
> = OnceLock::new();

View File

@@ -1,7 +1,7 @@
use std::collections::HashSet;
use anyhow::Context;
use komodo_client::entities::{SystemCommand, update::Update};
use komodo_client::entities::{update::Update, SystemCommand};
use super::query::VariablesAndSecrets;

View File

@@ -1,27 +1,27 @@
use std::{str::FromStr, time::Duration};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use komodo_client::{
api::write::{CreateBuilder, CreateServer},
entities::{
ResourceTarget,
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
komodo_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::Log,
user::{User, system_user},
user::{system_user, User},
ResourceTarget,
},
};
use mongo_indexed::Document;
use mungos::{
find::find_collect,
mongodb::bson::{Bson, doc, oid::ObjectId, to_document},
mongodb::bson::{doc, oid::ObjectId, to_document, Bson},
};
use periphery_client::PeripheryClient;
use rand::Rng;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use resolver_api::Resolve;
use crate::{
@@ -54,8 +54,8 @@ pub fn empty_or_only_spaces(word: &str) -> bool {
}
pub fn random_string(length: usize) -> String {
rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
@@ -208,9 +208,7 @@ pub async fn startup_cleanup() {
async fn startup_in_progress_update_cleanup() {
let log = Log::error(
"Komodo shutdown",
String::from(
"Komodo shutdown during execution. If this is a build, the builder may not have been terminated.",
),
String::from("Komodo shutdown during execution. If this is a build, the builder may not have been terminated.")
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
@@ -321,10 +319,7 @@ pub async fn ensure_first_server_and_builder() {
{
Ok(server) => server,
Err(e) => {
error!(
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
e.error
);
error!("Failed to initialize 'first_server'. Failed to CreateServer. {:#}", e.error);
return;
}
}
@@ -347,9 +342,6 @@ pub async fn ensure_first_server_and_builder() {
})
.await
{
error!(
"Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}",
e.error
);
error!("Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}", e.error);
}
}
}

View File

@@ -1,7 +1,7 @@
use std::time::{Duration, Instant};
use anyhow::{Context, anyhow};
use formatting::{Color, bold, colored, format_serror, muted};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use komodo_client::{
api::execute::*,
@@ -25,7 +25,7 @@ use crate::{
execute::{ExecuteArgs, ExecuteRequest},
write::WriteArgs,
},
resource::{KomodoResource, list_full_for_user_using_pattern},
resource::{list_full_for_user_using_pattern, KomodoResource},
state::db_client,
};
@@ -206,7 +206,7 @@ async fn execute_stage(
join_all(futures)
.await
.into_iter()
.collect::<anyhow::Result<Vec<_>>>()?;
.collect::<anyhow::Result<_>>()?;
Ok(())
}
@@ -1259,7 +1259,7 @@ impl ExtendBatch for BatchDeployStack {
fn single_execution(stack: String) -> Execution {
Execution::DeployStack(DeployStack {
stack,
services: Vec::new(),
service: None,
stop_time: None,
})
}
@@ -1280,7 +1280,7 @@ impl ExtendBatch for BatchDestroyStack {
fn single_execution(stack: String) -> Execution {
Execution::DestroyStack(DestroyStack {
stack,
services: Vec::new(),
service: None,
remove_orphans: false,
stop_time: None,
})

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use async_timing_util::{
ONE_DAY_MS, Timelength, unix_timestamp_ms, wait_until_timelength,
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS,
};
use futures::future::join_all;
use mungos::{find::find_collect, mongodb::bson::doc};
@@ -34,9 +34,8 @@ async fn prune_images() -> anyhow::Result<()> {
.await
.context("failed to get servers from db")?
.into_iter()
.filter(|server| {
server.config.enabled && server.config.auto_prune
})
// This could be done in the mongo query, but rather have rust type system guarantee this.
.filter(|server| server.config.auto_prune)
.map(|server| async move {
(
async {

View File

@@ -1,8 +1,7 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use komodo_client::entities::{
Operation, ResourceTarget, ResourceTargetVariant,
action::Action,
alerter::Alerter,
build::Build,
@@ -18,14 +17,15 @@ use komodo_client::entities::{
sync::ResourceSync,
tag::Tag,
update::Update,
user::{User, admin_service_user},
user::{admin_service_user, User},
user_group::UserGroup,
variable::Variable,
Operation, ResourceTarget, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{
bson::{Document, doc, oid::ObjectId},
bson::{doc, oid::ObjectId, Document},
options::FindOneOptions,
},
};
@@ -359,8 +359,8 @@ pub struct VariablesAndSecrets {
pub secrets: HashMap<String, String>,
}
pub async fn get_variables_and_secrets()
-> anyhow::Result<VariablesAndSecrets> {
pub async fn get_variables_and_secrets(
) -> anyhow::Result<VariablesAndSecrets> {
let variables = find_collect(&db_client().variables, None, None)
.await
.context("failed to get all variables from db")?;

View File

@@ -1,6 +1,5 @@
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTarget,
action::Action,
alerter::Alerter,
build::Build,
@@ -14,6 +13,7 @@ use komodo_client::entities::{
sync::ResourceSync,
update::{Update, UpdateListItem},
user::User,
Operation, ResourceTarget,
};
use mungos::{
by_id::{find_one_by_id, update_one_by_id},
@@ -263,7 +263,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDeploy(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::PullDeployment(data) => (
Operation::PullDeployment,
@@ -308,7 +308,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDestroyDeployment(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
// Build
@@ -319,7 +319,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchRunBuild(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::CancelBuild(data) => (
Operation::CancelBuild,
@@ -336,7 +336,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchCloneRepo(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::PullRepo(data) => (
Operation::PullRepo,
@@ -345,7 +345,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchPullRepo(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::BuildRepo(data) => (
Operation::BuildRepo,
@@ -354,7 +354,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchBuildRepo(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::CancelRepoBuild(data) => (
Operation::CancelRepoBuild,
@@ -371,7 +371,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchRunProcedure(_) => {
return Ok(Default::default());
return Ok(Default::default())
}
// Action
@@ -382,7 +382,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchRunAction(_) => {
return Ok(Default::default());
return Ok(Default::default())
}
// Server template
@@ -405,7 +405,7 @@ pub async fn init_execution_update(
// Stack
ExecuteRequest::DeployStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::DeployStackService
} else {
Operation::DeployStack
@@ -415,7 +415,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDeployStack(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::DeployStackIfChanged(data) => (
Operation::DeployStack,
@@ -424,10 +424,10 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDeployStackIfChanged(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
ExecuteRequest::StartStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::StartStackService
} else {
Operation::StartStack
@@ -437,7 +437,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::PullStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::PullStackService
} else {
Operation::PullStack
@@ -447,7 +447,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::RestartStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::RestartStackService
} else {
Operation::RestartStack
@@ -457,7 +457,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::PauseStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::PauseStackService
} else {
Operation::PauseStack
@@ -467,7 +467,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::UnpauseStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::UnpauseStackService
} else {
Operation::UnpauseStack
@@ -477,7 +477,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::StopStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::StopStackService
} else {
Operation::StopStack
@@ -487,7 +487,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::DestroyStack(data) => (
if !data.services.is_empty() {
if data.service.is_some() {
Operation::DestroyStackService
} else {
Operation::DestroyStack
@@ -497,7 +497,7 @@ pub async fn init_execution_update(
),
),
ExecuteRequest::BatchDestroyStack(_data) => {
return Ok(Default::default());
return Ok(Default::default())
}
// Alerter

View File

@@ -1,4 +1,4 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use axum::http::HeaderMap;
use hex::ToHex;
use hmac::{Hmac, Mac};

View File

@@ -1,4 +1,4 @@
use anyhow::{Context, anyhow};
use anyhow::{anyhow, Context};
use serde::Deserialize;
use crate::{

View File

@@ -1,6 +1,6 @@
use std::sync::Arc;
use axum::{Router, http::HeaderMap};
use axum::{http::HeaderMap, Router};
use komodo_client::entities::resource::Resource;
use tokio::sync::Mutex;

View File

@@ -22,7 +22,7 @@ use crate::{
helpers::update::init_execution_update,
};
use super::{ANY_BRANCH, ListenerLockCache};
use super::{ListenerLockCache, ANY_BRANCH};
// =======
// BUILD
@@ -231,7 +231,7 @@ impl StackExecution for DeployStack {
if stack.config.webhook_force_deploy {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack.id,
services: Vec::new(),
service: None,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;

View File

@@ -1,4 +1,4 @@
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use komodo_client::entities::{
action::Action, build::Build, procedure::Procedure, repo::Repo,
resource::Resource, stack::Stack, sync::ResourceSync,
@@ -11,13 +11,13 @@ use tracing::Instrument;
use crate::resource::KomodoResource;
use super::{
CustomSecret, VerifyBranch, VerifySecret,
resources::{
RepoWebhookOption, StackWebhookOption, SyncWebhookOption,
handle_action_webhook, handle_build_webhook,
handle_procedure_webhook, handle_repo_webhook,
handle_stack_webhook, handle_sync_webhook,
handle_stack_webhook, handle_sync_webhook, RepoWebhookOption,
StackWebhookOption, SyncWebhookOption,
},
CustomSecret, VerifyBranch, VerifySecret,
};
#[derive(Deserialize)]

View File

@@ -40,8 +40,8 @@ async fn app() -> anyhow::Result<()> {
tokio::join!(
// Init db_client check to crash on db init failure
state::init_db_client(),
// Manage OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::spawn_oidc_client_management()
// Init default OIDC client (defined in config / env vars / compose secret file)
auth::oidc::client::init_default_oidc_client()
);
tokio::join!(
// Maybe initialize first server
@@ -59,6 +59,7 @@ async fn app() -> anyhow::Result<()> {
resource::spawn_repo_state_refresh_loop();
resource::spawn_procedure_state_refresh_loop();
resource::spawn_action_state_refresh_loop();
resource::spawn_resource_sync_state_refresh_loop();
helpers::prune::spawn_prune_loop();
// Setup static frontend services

View File

@@ -1,9 +1,9 @@
use std::collections::HashMap;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
deployment::{Deployment, DeploymentState},
ResourceTarget,
};
use crate::{

View File

@@ -30,8 +30,8 @@ pub async fn check_alerts(ts: i64) {
}
#[instrument(level = "debug")]
async fn get_all_servers_map()
-> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
async fn get_all_servers_map(
) -> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
{
let servers = resource::list_full_for_user::<Server>(
ResourceQuery::default(),

View File

@@ -3,10 +3,10 @@ use std::{collections::HashMap, path::PathBuf, str::FromStr};
use anyhow::Context;
use derive_variants::ExtractVariant;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
komodo_timestamp, optional_string,
server::{Server, ServerState},
ResourceTarget,
};
use mongo_indexed::Indexed;
use mungos::{
@@ -85,9 +85,7 @@ pub async fn alert_servers(
id, name, region, ..
} => (id, name, region),
data => {
error!(
"got incorrect alert data in ServerStatus handler. got {data:?}"
);
error!("got incorrect alert data in ServerStatus handler. got {data:?}");
continue;
}
};
@@ -532,8 +530,8 @@ async fn resolve_alerts(alerts: &[(Alert, SendAlerts)]) {
}
#[instrument(level = "debug")]
async fn get_open_alerts()
-> anyhow::Result<(OpenAlertMap, OpenDiskAlertMap)> {
async fn get_open_alerts(
) -> anyhow::Result<(OpenAlertMap, OpenDiskAlertMap)> {
let alerts = find_collect(
&db_client().alerts,
doc! { "resolved": false },

View File

@@ -1,9 +1,9 @@
use std::collections::HashMap;
use komodo_client::entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
stack::{Stack, StackState},
ResourceTarget,
};
use crate::{

View File

@@ -6,8 +6,8 @@ use komodo_client::entities::{
stack::ComposeProject,
};
use periphery_client::{
PeripheryClient,
api::{GetDockerLists, GetDockerListsResponse},
PeripheryClient,
};
pub async fn get_docker_lists(

View File

@@ -7,7 +7,8 @@ use komodo_client::entities::{
container::ContainerListItem, image::ImageListItem,
network::NetworkListItem, volume::VolumeListItem,
},
komodo_timestamp, optional_string,
komodo_timestamp,
optional_string,
server::{Server, ServerHealth, ServerState},
stack::{ComposeProject, StackService, StackState},
stats::SystemStats,

View File

@@ -1,5 +1,5 @@
use komodo_client::entities::stats::{
SystemStatsRecord, TotalDiskUsage, sum_disk_usage,
sum_disk_usage, SystemStatsRecord, TotalDiskUsage,
};
use crate::state::{db_client, server_status_cache};

View File

@@ -7,7 +7,6 @@ use anyhow::Context;
use komodo_client::{
api::execute::{Deploy, DeployStack},
entities::{
ResourceTarget,
alert::{Alert, AlertData, SeverityLevel},
build::Build,
deployment::{Deployment, DeploymentImage, DeploymentState},
@@ -18,6 +17,7 @@ use komodo_client::{
komodo_timestamp,
stack::{Stack, StackService, StackServiceNames, StackState},
user::auto_redeploy_user,
ResourceTarget,
},
};
@@ -222,8 +222,8 @@ pub async fn update_deployment_cache(
}
/// (StackId, Service)
fn stack_alert_sent_cache()
-> &'static Mutex<HashSet<(String, String)>> {
fn stack_alert_sent_cache(
) -> &'static Mutex<HashSet<(String, String)>> {
static CACHE: OnceLock<Mutex<HashSet<(String, String)>>> =
OnceLock::new();
CACHE.get_or_init(Default::default)
@@ -322,8 +322,8 @@ pub async fn update_stack_cache(
}
}).collect::<Vec<_>>();
let mut update_available = false;
let mut images_with_update = Vec::new();
let mut services_to_update = Vec::new();
for service in services_with_containers.iter() {
if service.update_available {
@@ -336,7 +336,7 @@ pub async fn update_stack_cache(
.map(|c| c.state == ContainerStateStatusEnum::Running)
.unwrap_or_default()
{
services_to_update.push(service.service.clone());
update_available = true
}
}
}
@@ -346,7 +346,7 @@ pub async fn update_stack_cache(
&services,
containers,
);
if !services_to_update.is_empty()
if update_available
&& stack.config.auto_update
&& state == StackState::Running
&& !action_states()
@@ -358,16 +358,11 @@ pub async fn update_stack_cache(
{
let id = stack.id.clone();
let server_name = server_name.clone();
let services = if stack.config.auto_update_all_services {
Vec::new()
} else {
services_to_update
};
tokio::spawn(async move {
match execute::inner_handler(
ExecuteRequest::DeployStack(DeployStack {
stack: stack.name.clone(),
services,
service: None,
stop_time: None,
}),
auto_redeploy_user().to_owned(),

View File

@@ -2,7 +2,6 @@ use std::time::Duration;
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTargetVariant,
action::{
Action, ActionConfig, ActionConfigDiff, ActionInfo,
ActionListItem, ActionListItemInfo, ActionQuerySpecifics,
@@ -11,10 +10,11 @@ use komodo_client::entities::{
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{Collection, bson::doc, options::FindOneOptions},
mongodb::{bson::doc, options::FindOneOptions, Collection},
};
use crate::state::{action_state_cache, action_states, db_client};

View File

@@ -1,6 +1,5 @@
use derive_variants::ExtractVariant;
use komodo_client::entities::{
Operation, ResourceTargetVariant,
alerter::{
Alerter, AlerterConfig, AlerterConfigDiff, AlerterListItem,
AlerterListItemInfo, AlerterQuerySpecifics, PartialAlerterConfig,
@@ -8,6 +7,7 @@ use komodo_client::entities::{
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::mongodb::Collection;

View File

@@ -2,7 +2,6 @@ use std::time::Duration;
use anyhow::Context;
use komodo_client::entities::{
Operation, ResourceTargetVariant,
build::{
Build, BuildConfig, BuildConfigDiff, BuildInfo, BuildListItem,
BuildListItemInfo, BuildQuerySpecifics, BuildState,
@@ -14,10 +13,11 @@ use komodo_client::entities::{
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{Collection, bson::doc, options::FindOptions},
mongodb::{bson::doc, options::FindOptions, Collection},
};
use crate::{

View File

@@ -1,6 +1,5 @@
use anyhow::Context;
use komodo_client::entities::{
MergePartial, Operation, ResourceTargetVariant,
builder::{
Builder, BuilderConfig, BuilderConfigDiff, BuilderConfigVariant,
BuilderListItem, BuilderListItemInfo, BuilderQuerySpecifics,
@@ -11,10 +10,11 @@ use komodo_client::entities::{
server::Server,
update::Update,
user::User,
MergePartial, Operation, ResourceTargetVariant,
};
use mungos::mongodb::{
bson::{doc, to_document, Document},
Collection,
bson::{Document, doc, to_document},
};
use crate::state::db_client;

Some files were not shown because too many files have changed in this diff Show More