Compare commits

..

43 Commits

Author SHA1 Message Date
Maxwell Becker
d05c81864e 1.16.3 (#150)
* refactor listener api implementation for Gitlab integration

* version 1.16.3

* builder delete id link cleanup

* refactor and add "__ALL__" branch to avoid branch filtering

* frontend config the webhook url

* action webhook config

* clean up webhook url copy

* add __ALL__ branch switch for Actions / Procedures
2024-10-24 16:03:00 -07:00
mbecker20
f1a09f34ab tweak dev docs and runfile 2024-10-22 17:04:49 -04:00
mbecker20
23c6e6306d fix usage of runnables-cli in dev docs 2024-10-22 16:36:25 -04:00
mbecker20
800da90561 tweaks to dev docs 2024-10-22 15:25:33 -04:00
mbecker20
b24bf6ed89 fix docsite broken links 2024-10-22 15:17:10 -04:00
Matt Foxx
d66a781a13 docs: Add development docs (#136)
* docs: Add development POC

* docs: Flesh out full build/run steps

* feat: Add mergeable compose file to expose port and internal periphery url

* feat: Add .devcontainer and VSCode Tasks for developing Komodo

* Make cargo cache persistent in devcontainer

* Add deno to devcontainer

* Update tasks to include TS client copy to frontend before run

* Recommend extensions for used dependencies in vscode workspace

All extensions recommended are included in the devcontainer. This makes it easier for users not using devcontainer to get lang support.

* Update local `run` sequence for development docs
2024-10-22 12:09:26 -07:00
Maxwell Becker
f9b2994d44 1.16.2 (#145)
* Env vars written using same quotes (single vs double) as the user passes

* fmt

* trim start matches '-'

* ts client version
2024-10-22 11:41:17 -07:00
mbecker20
c0d6d96b64 get username works for service users 2024-10-22 03:36:20 -04:00
mbecker20
34496b948a bump ts client to 1.16.1 2024-10-22 02:58:42 -04:00
mbecker20
90c6adf923 fix periphery installer force file recreation command 2024-10-22 02:55:39 -04:00
mbecker20
3b72dc65cc remove "Overviews" label from sidebar 2024-10-22 02:27:22 -04:00
mbecker20
05f38d02be bump version to 1.16.1 2024-10-22 02:21:16 -04:00
Maxwell Becker
ea5506c202 1.16.1 (#143)
* ensure sync state cache is refreshed on sync create / copy

* clean up resources post_create

* show sidebar if element length > 1

* update `run_komodo_command` command

* rename all resources

* refresh repo cache after clone / pull

* improve rename repo log
2024-10-21 23:19:40 -07:00
mbecker20
64b0a5c9d2 delete unrelated caddy compose 2024-10-21 00:30:54 -04:00
mbecker20
93cc6a3a6e Add running Action to dashboard "Active" 2024-10-21 00:21:17 -04:00
mbecker20
7ae69cf33b ignore top level return linting 2024-10-20 05:10:28 -04:00
mbecker20
404e00cc64 move action info inline 2024-10-20 04:43:29 -04:00
mbecker20
6fe5bc7420 properly host client lib for deno importing (types working) 2024-10-20 03:17:58 -04:00
mbecker20
82324b00ee typescript komodo_client v 1.16.0 2024-10-20 02:35:43 -04:00
Maxwell Becker
5daba3a557 1.16.0 (#140)
* consolidate deserializers

* key value list doc

* use string list deserializers for all entity Vec<String>

* add additional env files support

* plumbing for Action resource

* js client readme indentation

* regen lock

* add action UI

* action backend

* start on action frontend

* update lock

* get up to speed

* get action started

* clean up default action file

* seems to work

* toml export include action

* action works

* action works part 2

* bump rust version to 1.82.0

* copy deno bin from bin image

* action use local dir

* update not having changes doesn't return error

* format with prettier

* support yaml formatting with prettier

* variable no change is Ok
2024-10-19 23:27:28 -07:00
mbecker20
020cdc06fd remove migrator link in readme 2024-10-18 21:23:02 -04:00
Maxwell Becker
cb270f4dff 1.15.12 (#139)
* add containers link to mobile dropdown

* fix update / alert not showing permission issue

* prevent disk alert back and forth

* improve user group pending toml
2024-10-18 17:14:22 -07:00
Matt Foxx
21666cf9b3 feat: Add docs link to topbar (#134) 2024-10-18 16:10:01 -07:00
mbecker20
a417926690 1.15.11 allow adding stack to user group 2024-10-16 23:09:06 -04:00
mbecker20
293b36fae4 Allow adding Stack to User Group 2024-10-16 23:08:44 -04:00
mbecker20
dca37e9ba8 1.15.10 connect with http using DOCKER_HOST 2024-10-16 22:16:07 -04:00
Morgan Wyatt
1cc302fcbf Update docker.rs to allow http docker socket connection (#131)
* Update docker.rs to allow http docker socket connection

Add or_else to allow attempt to connect to docker socket proxy via http if local connection fails

* Update docker.rs

Change two part connection to use connect_with_defaults instead, per review on PR.
2024-10-16 19:13:19 -07:00
mbecker20
febcf739d0 Remove Comma from installer: thanks @PiotrBzdrega 2024-10-16 10:43:54 -04:00
mbecker20
cb79e00794 update systemd service file 2024-10-15 17:35:54 -04:00
mbecker20
869b397596 force service file recreation docs 2024-10-15 17:25:29 -04:00
Maxwell Becker
41d1ff9760 1.15.9 (#127)
* add close alert threshold to prevent Ok - Warning back and forth

* remove part about repo being deleted, no longer behavior

* resource sync share general common

* remove this changelog. use releases

* remove changelog from readme

* write commit file clean up path

* docs: supports any git provider repo

* fix docs: authorization

* multiline command supports escaped newlines

* move webhook to build config advanced

* parser comments with escaped newline

* improve parser

* save use Enter. escape monaco using escape

* improve logic when deployment / stack action buttons shown

* used_mem = total - available

* Fix unrecognized path have 404

* webhooks will 404 if misconfigured

* move update logger / alerter

* delete migrator

* update examples

* publish typescript client komodo_client
2024-10-14 23:04:49 -07:00
mbecker20
dfafadf57b demo / build username pw 2024-10-14 11:49:44 -04:00
mbecker20
538a79b8b5 fix upausing all container action state 2024-10-13 18:11:09 -04:00
Maxwell Becker
5088dc5c3c 1.15.8 (#124)
* fix all containers restart and unpause

* add CommitSync to Procedure

* validate resource query tags causes failure on non exist

* files on host init working. match tags fail if tag doesnt exist

* intelligent sync match tag selector

* fix linting

* Wait for user initialize file on host
2024-10-13 15:03:16 -07:00
mbecker20
581d7e0b2c fix Procedure sync log 2024-10-13 04:21:03 -04:00
mbecker20
657298041f remove unneeded syncs volume 2024-10-13 04:03:09 -04:00
mbecker20
d71e9dca11 fix version 2024-10-13 03:21:56 -04:00
Maxwell Becker
165131bdf8 1.15.7 (#119)
* 1.15.7-dev ensure git config set

* add username to commit msg
2024-10-13 00:01:14 -07:00
mbecker20
0a81d2a0d0 add labels to mongo compose 2024-10-13 00:57:13 -04:00
Maxwell Becker
44ab5eb804 1.15.6 (#117)
* add periphery.skip label, skip in StopAllContainers

* add core config sync directory

* deploy stack if changed

* fix stack env_file_path when git repo and using run_directory

* deploy stack if changed

* write sync contents

* commit to git based sync, managed git based sync

* can sync non UI defined resource syncs

* sync UI control

* clippy

* init new stack compose file in repo

* better error message when attached Server / Builder invalid

* specify multiple resource file paths (mixed files + folders)

* use react charts

* tweak stats charts

* add Containers page

* 1.15.6

* stack deploy check if deployes vs remote has changed

* improve ux with loading indicators

* sync diff accounts for deploy / after

* fix new chart time axes
2024-10-12 21:42:46 -07:00
Maxwell Becker
e3d8e603ec 1.15.5 (#116)
* 1.15.5
- Update your user's username and password
- **Admin**: Delete Users

* update username / password / delete user backend

* bump version

* alerter default disabled

* delete users and update username / password

* set password "" after update
2024-10-11 19:42:43 -07:00
mbecker20
8b5c179473 account recover note 2024-10-11 19:16:01 -04:00
mbecker20
8582bc92da fix Destroy Before Deploy config 2024-10-10 04:17:17 -04:00
360 changed files with 26011 additions and 11066 deletions

View File

@@ -0,0 +1,33 @@
services:
dev:
image: mcr.microsoft.com/devcontainers/rust:1-1-bullseye
volumes:
# Mount the root folder that contains .git
- ../:/workspace:cached
- /var/run/docker.sock:/var/run/docker.sock
- /proc:/proc
- repos:/etc/komodo/repos
- stacks:/etc/komodo/stacks
command: sleep infinity
ports:
- "9121:9121"
environment:
KOMODO_FIRST_SERVER: http://localhost:8120
KOMODO_DATABASE_ADDRESS: db
KOMODO_ENABLE_NEW_USERS: true
KOMODO_LOCAL_AUTH: true
KOMODO_JWT_SECRET: a_random_secret
links:
- db
# ...
db:
extends:
file: ../test.compose.yaml
service: ferretdb
volumes:
data:
repo-cache:
repos:
stacks:

View File

@@ -0,0 +1,46 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/rust
{
"name": "Komodo",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
//"image": "mcr.microsoft.com/devcontainers/rust:1-1-bullseye",
"dockerComposeFile": ["dev.compose.yaml"],
"workspaceFolder": "/workspace",
"service": "dev",
// Features to add to the dev container. More info: https://containers.dev/features.
"features": {
"ghcr.io/devcontainers/features/node:1": {
"version": "18.18.0"
},
"ghcr.io/devcontainers-community/features/deno:1": {
}
},
// Use 'mounts' to make the cargo cache persistent in a Docker Volume.
"mounts": [
{
"source": "devcontainer-cargo-cache-${devcontainerId}",
"target": "/usr/local/cargo",
"type": "volume"
}
],
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [
9121
],
// Use 'postCreateCommand' to run commands after the container is created.
"postCreateCommand": "./.devcontainer/postCreate.sh",
"runServices": [
"db"
]
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}

3
.devcontainer/postCreate.sh Executable file
View File

@@ -0,0 +1,3 @@
#!/bin/sh
cargo install typeshare-cli

5
.gitignore vendored
View File

@@ -8,7 +8,4 @@ dist
.DS_Store
creds.toml
.core-repos
.repos
.stacks
.ssl
.komodo

8
.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,8 @@
{
"recommendations": [
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"vadimcn.vscode-lldb",
"denoland.vscode-deno"
]
}

179
.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,179 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "Run Core",
"command": "cargo",
"args": [
"run",
"-p",
"komodo_core",
"--release"
],
"options": {
"cwd": "${workspaceFolder}",
"env": {
"KOMODO_CONFIG_PATH": "test.core.config.toml"
}
},
"problemMatcher": [
"$rustc"
]
},
{
"label": "Build Core",
"command": "cargo",
"args": [
"build",
"-p",
"komodo_core",
"--release"
],
"options": {
"cwd": "${workspaceFolder}",
"env": {
"KOMODO_CONFIG_PATH": "test.core.config.toml"
}
},
"problemMatcher": [
"$rustc"
]
},
{
"label": "Run Periphery",
"command": "cargo",
"args": [
"run",
"-p",
"komodo_periphery",
"--release"
],
"options": {
"cwd": "${workspaceFolder}",
"env": {
"KOMODO_CONFIG_PATH": "test.periphery.config.toml"
}
},
"problemMatcher": [
"$rustc"
]
},
{
"label": "Build Periphery",
"command": "cargo",
"args": [
"build",
"-p",
"komodo_periphery",
"--release"
],
"options": {
"cwd": "${workspaceFolder}",
"env": {
"KOMODO_CONFIG_PATH": "test.periphery.config.toml"
}
},
"problemMatcher": [
"$rustc"
]
},
{
"label": "Run Backend",
"dependsOn": [
"Run Core",
"Run Periphery"
],
"problemMatcher": [
"$rustc"
]
},
{
"label": "Build TS Client Types",
"type": "process",
"command": "node",
"args": [
"./client/core/ts/generate_types.mjs"
],
"problemMatcher": []
},
{
"label": "Init TS Client",
"type": "shell",
"command": "yarn && yarn build && yarn link",
"options": {
"cwd": "${workspaceFolder}/client/core/ts",
},
"problemMatcher": []
},
{
"label": "Init Frontend Client",
"type": "shell",
"command": "yarn link komodo_client && yarn install",
"options": {
"cwd": "${workspaceFolder}/frontend",
},
"problemMatcher": []
},
{
"label": "Init Frontend",
"dependsOn": [
"Build TS Client Types",
"Init TS Client",
"Init Frontend Client"
],
"dependsOrder": "sequence",
"problemMatcher": []
},
{
"label": "Build Frontend",
"type": "shell",
"command": "yarn build",
"options": {
"cwd": "${workspaceFolder}/frontend",
},
"problemMatcher": []
},
{
"label": "Prepare Frontend For Run",
"type": "shell",
"command": "cp -r ./client/core/ts/dist/. frontend/public/client/.",
"options": {
"cwd": "${workspaceFolder}",
},
"dependsOn": [
"Build TS Client Types",
"Build Frontend"
],
"dependsOrder": "sequence",
"problemMatcher": []
},
{
"label": "Run Frontend",
"type": "shell",
"command": "yarn dev",
"options": {
"cwd": "${workspaceFolder}/frontend",
},
"dependsOn": ["Prepare Frontend For Run"],
"problemMatcher": []
},
{
"label": "Init",
"dependsOn": [
"Build Backend",
"Init Frontend"
],
"dependsOrder": "sequence",
"problemMatcher": []
},
{
"label": "Run Komodo",
"dependsOn": [
"Run Core",
"Run Periphery",
"Run Frontend"
],
"problemMatcher": []
},
]
}

487
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,15 @@
[workspace]
resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
members = [
"bin/*",
"lib/*",
"example/*",
"client/core/rs",
"client/periphery/rs",
]
[workspace.package]
version = "1.15.4"
version = "1.16.3"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
@@ -15,7 +21,7 @@ homepage = "https://komo.do"
[workspace.dependencies]
# LOCAL
# komodo_client = "1.14.3"
# komodo_client = "1.15.6"
komodo_client = { path = "client/core/rs" }
periphery_client = { path = "client/periphery/rs" }
environment_file = { path = "lib/environment_file" }
@@ -58,12 +64,12 @@ tokio-tungstenite = "0.24.0"
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.210", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.128"
serde_json = "1.0.132"
serde_yaml = "0.9.34"
toml = "0.8.19"
# ERROR
anyhow = "1.0.89"
anyhow = "1.0.90"
thiserror = "1.0.64"
# LOGGING
@@ -108,4 +114,4 @@ octorust = "0.7.0"
dashmap = "6.1.0"
colored = "2.1.0"
regex = "1.11.0"
bson = "2.13.0"
bson = "2.13.0"

View File

@@ -21,6 +21,9 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::None(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunAction(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunProcedure(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -129,9 +132,15 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CommitSync(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeployStackIfChanged(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartStack(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -162,6 +171,9 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
info!("Running Execution...");
let res = match execution {
Execution::RunAction(request) => {
komodo_client().execute(request).await
}
Execution::RunProcedure(request) => {
komodo_client().execute(request).await
}
@@ -270,9 +282,15 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::RunSync(request) => {
komodo_client().execute(request).await
}
Execution::CommitSync(request) => {
komodo_client().write(request).await
}
Execution::DeployStack(request) => {
komodo_client().execute(request).await
}
Execution::DeployStackIfChanged(request) => {
komodo_client().execute(request).await
}
Execution::StartStack(request) => {
komodo_client().execute(request).await
}

View File

@@ -19,6 +19,7 @@ komodo_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
environment_file.workspace = true
formatting.workspace = true
command.workspace = true
logger.workspace = true
git.workspace = true
# mogh

View File

@@ -4,7 +4,7 @@
## and may negatively affect runtime performance.
# Build Core
FROM rust:1.81.0-alpine AS core-builder
FROM rust:1.82.0-alpine AS core-builder
WORKDIR /builder
RUN apk update && apk --no-cache add musl-dev openssl-dev openssl-libs-static
COPY . .
@@ -16,14 +16,14 @@ WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM alpine:3.20
# Install Deps
RUN apk update && apk add --no-cache --virtual .build-deps \
openssl ca-certificates git git-lfs
openssl ca-certificates git git-lfs curl
# Setup an application directory
WORKDIR /app
@@ -32,6 +32,7 @@ WORKDIR /app
COPY ./config/core.config.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /app
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Hint at the port
EXPOSE 9120

View File

@@ -1,5 +1,5 @@
# Build Core
FROM rust:1.81.0-bullseye AS core-builder
FROM rust:1.82.0-bullseye AS core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p komodo_core --release
@@ -10,7 +10,7 @@ WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
RUN cd frontend && yarn link komodo_client && yarn && yarn build
# Final Image
FROM debian:bullseye-slim
@@ -27,6 +27,7 @@ WORKDIR /app
COPY ./config/core.config.toml /config/config.toml
COPY --from=core-builder /builder/target/release/core /app
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
# Hint at the port
EXPOSE 9120

View File

@@ -201,6 +201,9 @@ fn resource_link(
ResourceTargetVariant::Procedure => {
format!("/procedures/{id}")
}
ResourceTargetVariant::Action => {
format!("/actions/{id}")
}
ResourceTargetVariant::ServerTemplate => {
format!("/server-templates/{id}")
}

View File

@@ -0,0 +1,209 @@
use std::collections::HashSet;
use anyhow::Context;
use command::run_komodo_command;
use komodo_client::{
api::{
execute::RunAction,
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
},
entities::{
action::Action,
config::core::CoreConfig,
permission::PermissionLevel,
update::Update,
user::{action_user, User},
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
config::core_config,
helpers::{
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_string,
},
query::get_variables_and_secrets,
random_string,
update::update_update,
},
resource::{self, refresh_action_state_cache},
state::{action_states, db_client, State},
};
impl Resolve<RunAction, (User, Update)> for State {
async fn resolve(
&self,
RunAction { action }: RunAction,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let mut action = resource::get_check_permissions::<Action>(
&action,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the action (or insert default).
let action_state = action_states()
.action
.get_or_insert_default(&action.id)
.await;
// This will set action state back to default when dropped.
// Will also check to ensure action not already busy before updating.
let _action_guard =
action_state.update(|state| state.running = true)?;
update_update(update.clone()).await?;
let CreateApiKeyResponse { key, secret } = State
.resolve(
CreateApiKey {
name: update.id.clone(),
expires: 0,
},
action_user().to_owned(),
)
.await?;
let contents = &mut action.config.file_contents;
// Wrap the file contents in the execution context.
*contents = full_contents(contents, &key, &secret);
let replacers =
interpolate(contents, &mut update, key.clone(), secret.clone())
.await?
.into_iter()
.collect::<Vec<_>>();
let path = core_config()
.action_directory
.join(format!("{}.ts", random_string(10)));
if let Some(parent) = path.parent() {
let _ = fs::create_dir_all(parent).await;
}
fs::write(&path, contents).await.with_context(|| {
format!("Faild to write action file to {path:?}")
})?;
let mut res = run_komodo_command(
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
"Execute Action",
None,
format!(
"deno run --allow-read --allow-net --allow-import {}",
path.display()
),
false,
)
.await;
res.stdout = svi::replace_in_string(&res.stdout, &replacers)
.replace(&key, "<ACTION_API_KEY>");
res.stderr = svi::replace_in_string(&res.stderr, &replacers)
.replace(&secret, "<ACTION_API_SECRET>");
if let Err(e) = fs::remove_file(path).await {
warn!(
"Failed to delete action file after action execution | {e:#}"
);
}
if let Err(e) = State
.resolve(DeleteApiKey { key }, action_user().to_owned())
.await
{
warn!(
"Failed to delete API key after action execution | {e:#}"
);
};
update.logs.push(res);
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with update_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_action_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
}
async fn interpolate(
contents: &mut String,
update: &mut Update,
key: String,
secret: String,
) -> anyhow::Result<HashSet<(String, String)>> {
let mut vars_and_secrets = get_variables_and_secrets().await?;
vars_and_secrets
.secrets
.insert(String::from("ACTION_API_KEY"), key);
vars_and_secrets
.secrets
.insert(String::from("ACTION_API_SECRET"), secret);
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_string(
&vars_and_secrets,
contents,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(update, &global_replacers, &secret_replacers);
Ok(secret_replacers)
}
fn full_contents(contents: &str, key: &str, secret: &str) -> String {
let CoreConfig {
port, ssl_enabled, ..
} = core_config();
let protocol = if *ssl_enabled { "https" } else { "http" };
let base_url = format!("{protocol}://localhost:{port}");
format!(
"import {{ KomodoClient }} from '{base_url}/client/lib.js';
const komodo = KomodoClient('{base_url}', {{
type: 'api-key',
params: {{ key: '{key}', secret: '{secret}' }}
}});
async function main() {{{contents}}}
main().catch(error => {{
console.error('🚨 Action exited early with errors 🚨')
if (error.status !== undefined && error.result !== undefined) {{
console.error('Status:', error.status);
console.error(JSON.stringify(error.result, null, 2));
}} else {{
console.error(JSON.stringify(error, null, 2));
}}
Deno.exit(1)
}}).then(() => console.log('🦎 Action completed successfully 🦎'));"
)
}

View File

@@ -24,6 +24,7 @@ use crate::{
state::{db_client, State},
};
mod action;
mod build;
mod deployment;
mod procedure;
@@ -76,6 +77,7 @@ pub enum ExecuteRequest {
// ==== STACK ====
DeployStack(DeployStack),
DeployStackIfChanged(DeployStackIfChanged),
StartStack(StartStack),
RestartStack(RestartStack),
StopStack(StopStack),
@@ -96,6 +98,9 @@ pub enum ExecuteRequest {
// ==== PROCEDURE ====
RunProcedure(RunProcedure),
// ==== ACTION ====
RunAction(RunAction),
// ==== SERVER TEMPLATE ====
LaunchServer(LaunchServer),

View File

@@ -3,7 +3,7 @@ use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use komodo_client::{
api::execute::*,
api::{execute::*, write::RefreshRepoCache},
entities::{
alert::{Alert, AlertData, SeverityLevel},
builder::{Builder, BuilderConfig},
@@ -123,6 +123,17 @@ impl Resolve<CloneRepo, (User, Update)> for State {
update_last_pulled_time(&repo.name).await;
}
if let Err(e) = State
.resolve(RefreshRepoCache { repo: repo.id }, user)
.await
.context("Failed to refresh repo cache")
{
update.push_error_log(
"Refresh Repo cache",
format_serror(&e.into()),
);
};
handle_server_update_return(update).await
}
}
@@ -207,6 +218,17 @@ impl Resolve<PullRepo, (User, Update)> for State {
update_last_pulled_time(&repo.name).await;
}
if let Err(e) = State
.resolve(RefreshRepoCache { repo: repo.id }, user)
.await
.context("Failed to refresh repo cache")
{
update.push_error_log(
"Refresh Repo cache",
format_serror(&e.into()),
);
};
handle_server_update_return(update).await
}
}

View File

@@ -425,7 +425,7 @@ impl Resolve<RestartAllContainers, (User, Update)> for State {
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.request(api::container::RestartAllContainers {})
.await
.context("failed to restart all containers on host")?;
@@ -520,12 +520,12 @@ impl Resolve<UnpauseAllContainers, (User, Update)> for State {
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.starting_containers = true)?;
.update(|state| state.unpausing_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.request(api::container::UnpauseAllContainers {})
.await
.context("failed to unpause all containers on host")?;

View File

@@ -3,9 +3,11 @@ use std::collections::HashSet;
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::execute::*,
api::{execute::*, write::RefreshStackCache},
entities::{
permission::PermissionLevel, stack::StackInfo, update::Update,
permission::PermissionLevel,
stack::{Stack, StackInfo},
update::Update,
user::User,
},
};
@@ -23,9 +25,10 @@ use crate::{
},
periphery_client,
query::get_variables_and_secrets,
update::update_update,
update::{add_update_without_send, update_update},
},
monitor::update_cache_for_server,
resource,
stack::{
execute::execute_compose, get_stack_and_server,
services::extract_services_into_res,
@@ -243,6 +246,77 @@ impl Resolve<DeployStack, (User, Update)> for State {
}
}
impl Resolve<DeployStackIfChanged, (User, Update)> for State {
async fn resolve(
&self,
DeployStackIfChanged { stack, stop_time }: DeployStackIfChanged,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let stack = resource::get_check_permissions::<Stack>(
&stack,
&user,
PermissionLevel::Execute,
)
.await?;
State
.resolve(
RefreshStackCache {
stack: stack.id.clone(),
},
user.clone(),
)
.await?;
let stack = resource::get::<Stack>(&stack.id).await?;
let changed = match (
&stack.info.deployed_contents,
&stack.info.remote_contents,
) {
(Some(deployed_contents), Some(latest_contents)) => {
let changed = || {
for latest in latest_contents {
let Some(deployed) = deployed_contents
.iter()
.find(|c| c.path == latest.path)
else {
return true;
};
if latest.contents != deployed.contents {
return true;
}
}
false
};
changed()
}
(None, _) => true,
_ => false,
};
if !changed {
update.push_simple_log(
"Diff compose files",
String::from("Deploy cancelled after no changes detected."),
);
update.finalize();
return Ok(update);
}
// Don't actually send it here, let the handler send it after it can set action state.
// This is usually done in crate::helpers::update::init_execution_update.
update.id = add_update_without_send(&update).await?;
State
.resolve(
DeployStack {
stack: stack.name,
stop_time,
},
(user, update),
)
.await
}
}
impl Resolve<StartStack, (User, Update)> for State {
#[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(

View File

@@ -1,4 +1,4 @@
use std::collections::HashMap;
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
@@ -6,6 +6,7 @@ use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self,
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
@@ -20,23 +21,27 @@ use komodo_client::{
sync::ResourceSync,
update::{Log, Update},
user::{sync_user, User},
ResourceTargetVariant,
},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{oid::ObjectId, to_document},
};
use resolver_api::Resolve;
use crate::{
helpers::{query::get_id_to_tags, update::update_update},
resource::{self, refresh_resource_sync_state_cache},
state::{db_client, State},
state::{action_states, db_client, State},
sync::{
deploy::{
build_deploy_cache, deploy_from_cache, SyncDeployParams,
},
execute::{get_updates_for_execution, ExecuteResourceSync},
remote::RemoteResources,
AllResourcesById,
AllResourcesById, ResourceSyncTrait,
},
};
@@ -44,7 +49,11 @@ impl Resolve<RunSync, (User, Update)> for State {
#[instrument(name = "RunSync", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RunSync { sync }: RunSync,
RunSync {
sync,
resource_type: match_resource_type,
resources: match_resources,
}: RunSync,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
@@ -52,6 +61,17 @@ impl Resolve<RunSync, (User, Update)> for State {
>(&sync, &user, PermissionLevel::Execute)
.await?;
// get the action state for the sync (or insert default).
let action_state = action_states()
.resource_sync
.get_or_insert_default(&sync.id)
.await;
// This will set action state back to default when dropped.
// Will also check to ensure sync not already busy before updating.
let _action_guard =
action_state.update(|state| state.syncing = true)?;
// Send update here for FE to recheck action state
update_update(update.clone()).await?;
@@ -70,22 +90,105 @@ impl Resolve<RunSync, (User, Update)> for State {
update_update(update.clone()).await?;
if !file_errors.is_empty() {
return Err(anyhow!("Found file errors. Cannot execute sync."))
return Err(anyhow!("Found file errors. Cannot execute sync."));
}
let resources = resources?;
let id_to_tags = get_id_to_tags(None).await?;
let all_resources = AllResourcesById::load().await?;
// Convert all match_resources to names
let match_resources = match_resources.map(|resources| {
resources
.into_iter()
.filter_map(|name_or_id| {
let Some(resource_type) = match_resource_type else {
return Some(name_or_id);
};
match ObjectId::from_str(&name_or_id) {
Ok(_) => match resource_type {
ResourceTargetVariant::Alerter => all_resources
.alerters
.get(&name_or_id)
.map(|a| a.name.clone()),
ResourceTargetVariant::Build => all_resources
.builds
.get(&name_or_id)
.map(|b| b.name.clone()),
ResourceTargetVariant::Builder => all_resources
.builders
.get(&name_or_id)
.map(|b| b.name.clone()),
ResourceTargetVariant::Deployment => all_resources
.deployments
.get(&name_or_id)
.map(|d| d.name.clone()),
ResourceTargetVariant::Procedure => all_resources
.procedures
.get(&name_or_id)
.map(|p| p.name.clone()),
ResourceTargetVariant::Action => all_resources
.actions
.get(&name_or_id)
.map(|p| p.name.clone()),
ResourceTargetVariant::Repo => all_resources
.repos
.get(&name_or_id)
.map(|r| r.name.clone()),
ResourceTargetVariant::Server => all_resources
.servers
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::ServerTemplate => all_resources
.templates
.get(&name_or_id)
.map(|t| t.name.clone()),
ResourceTargetVariant::Stack => all_resources
.stacks
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::ResourceSync => all_resources
.syncs
.get(&name_or_id)
.map(|s| s.name.clone()),
ResourceTargetVariant::System => None,
},
Err(_) => Some(name_or_id),
}
})
.collect::<Vec<_>>()
});
let deployments_by_name = all_resources
.deployments
.values()
.filter(|deployment| {
Deployment::include_resource(
&deployment.name,
&deployment.config,
match_resource_type,
match_resources.as_deref(),
&deployment.tags,
&id_to_tags,
&sync.config.match_tags,
)
})
.map(|deployment| (deployment.name.clone(), deployment.clone()))
.collect::<HashMap<_, _>>();
let stacks_by_name = all_resources
.stacks
.values()
.filter(|stack| {
Stack::include_resource(
&stack.name,
&stack.config,
match_resource_type,
match_resources.as_deref(),
&stack.tags,
&id_to_tags,
&sync.config.match_tags,
)
})
.map(|stack| (stack.name.clone(), stack.clone()))
.collect::<HashMap<_, _>>();
@@ -105,6 +208,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.servers,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -117,6 +222,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.deployments,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -126,6 +233,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.stacks,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -135,6 +244,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.builds,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -144,6 +255,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.repos,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -156,15 +269,30 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.procedures,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (actions_to_create, actions_to_update, actions_to_delete) =
get_updates_for_execution::<Action>(
resources.actions,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (builders_to_create, builders_to_update, builders_to_delete) =
get_updates_for_execution::<Builder>(
resources.builders,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -174,6 +302,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.alerters,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -186,6 +316,8 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.server_templates,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
@@ -198,31 +330,48 @@ impl Resolve<RunSync, (User, Update)> for State {
resources.resource_syncs,
delete,
&all_resources,
match_resource_type,
match_resources.as_deref(),
&id_to_tags,
&sync.config.match_tags,
)
.await?;
let (
variables_to_create,
variables_to_update,
variables_to_delete,
) = crate::sync::variables::get_updates_for_execution(
resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
)
.await?;
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.match_tags.is_empty()
{
crate::sync::variables::get_updates_for_execution(
resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
)
.await?
} else {
Default::default()
};
let (
user_groups_to_create,
user_groups_to_update,
user_groups_to_delete,
) = crate::sync::user_groups::get_updates_for_execution(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
&all_resources,
)
.await?;
) = if match_resource_type.is_none()
&& match_resources.is_none()
&& sync.config.match_tags.is_empty()
{
crate::sync::user_groups::get_updates_for_execution(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
&all_resources,
)
.await?
} else {
Default::default()
};
if deploy_cache.is_empty()
&& resource_syncs_to_create.is_empty()
@@ -255,6 +404,9 @@ impl Resolve<RunSync, (User, Update)> for State {
&& procedures_to_create.is_empty()
&& procedures_to_update.is_empty()
&& procedures_to_delete.is_empty()
&& actions_to_create.is_empty()
&& actions_to_update.is_empty()
&& actions_to_delete.is_empty()
&& user_groups_to_create.is_empty()
&& user_groups_to_update.is_empty()
&& user_groups_to_delete.is_empty()
@@ -331,6 +483,15 @@ impl Resolve<RunSync, (User, Update)> for State {
)
.await,
);
maybe_extend(
&mut update.logs,
Action::execute_sync_updates(
actions_to_create,
actions_to_update,
actions_to_delete,
)
.await,
);
// Dependent on server
maybe_extend(

View File

@@ -0,0 +1,132 @@
use anyhow::Context;
use komodo_client::{
api::read::*,
entities::{
action::{
Action, ActionActionState, ActionListItem, ActionState,
},
permission::PermissionLevel,
user::User,
},
};
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{action_state_cache, action_states, State},
};
impl Resolve<GetAction, User> for State {
async fn resolve(
&self,
GetAction { action }: GetAction,
user: User,
) -> anyhow::Result<Action> {
resource::get_check_permissions::<Action>(
&action,
&user,
PermissionLevel::Read,
)
.await
}
}
impl Resolve<ListActions, User> for State {
async fn resolve(
&self,
ListActions { query }: ListActions,
user: User,
) -> anyhow::Result<Vec<ActionListItem>> {
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Action>(query, &user, &all_tags).await
}
}
impl Resolve<ListFullActions, User> for State {
async fn resolve(
&self,
ListFullActions { query }: ListFullActions,
user: User,
) -> anyhow::Result<ListFullActionsResponse> {
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Action>(query, &user, &all_tags)
.await
}
}
impl Resolve<GetActionActionState, User> for State {
async fn resolve(
&self,
GetActionActionState { action }: GetActionActionState,
user: User,
) -> anyhow::Result<ActionActionState> {
let action = resource::get_check_permissions::<Action>(
&action,
&user,
PermissionLevel::Read,
)
.await?;
let action_state = action_states()
.action
.get(&action.id)
.await
.unwrap_or_default()
.get()?;
Ok(action_state)
}
}
impl Resolve<GetActionsSummary, User> for State {
async fn resolve(
&self,
GetActionsSummary {}: GetActionsSummary,
user: User,
) -> anyhow::Result<GetActionsSummaryResponse> {
let actions = resource::list_full_for_user::<Action>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get actions from db")?;
let mut res = GetActionsSummaryResponse::default();
let cache = action_state_cache();
let action_states = action_states();
for action in actions {
res.total += 1;
match (
cache.get(&action.id).await.unwrap_or_default(),
action_states
.action
.get(&action.id)
.await
.unwrap_or_default()
.get()?,
) {
(_, action_states) if action_states.running => {
res.running += 1;
}
(ActionState::Ok, _) => res.ok += 1,
(ActionState::Failed, _) => res.failed += 1,
(ActionState::Unknown, _) => res.unknown += 1,
// will never come off the cache in the running state, since that comes from action states
(ActionState::Running, _) => unreachable!(),
}
}
Ok(res)
}
}

View File

@@ -3,7 +3,10 @@ use komodo_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},
entities::{deployment::Deployment, server::Server, user::User},
entities::{
deployment::Deployment, server::Server, stack::Stack,
sync::ResourceSync, user::User,
},
};
use mungos::{
by_id::find_one_by_id,
@@ -30,12 +33,18 @@ impl Resolve<ListAlerts, User> for State {
if !user.admin && !core_config().transparent_mode {
let server_ids =
get_resource_ids_for_user::<Server>(&user).await?;
let stack_ids =
get_resource_ids_for_user::<Stack>(&user).await?;
let deployment_ids =
get_resource_ids_for_user::<Deployment>(&user).await?;
let sync_ids =
get_resource_ids_for_user::<ResourceSync>(&user).await?;
query.extend(doc! {
"$or": [
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
{ "target.type": "Stack", "target.id": { "$in": &stack_ids } },
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
{ "target.type": "ResourceSync", "target.id": { "$in": &sync_ids } },
]
});
}

View File

@@ -12,6 +12,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{db_client, State},
};
@@ -37,7 +38,12 @@ impl Resolve<ListAlerters, User> for State {
ListAlerters { query }: ListAlerters,
user: User,
) -> anyhow::Result<Vec<AlerterListItem>> {
resource::list_for_user::<Alerter>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Alerter>(query, &user, &all_tags).await
}
}
@@ -47,7 +53,13 @@ impl Resolve<ListFullAlerters, User> for State {
ListFullAlerters { query }: ListFullAlerters,
user: User,
) -> anyhow::Result<ListFullAlertersResponse> {
resource::list_full_for_user::<Alerter>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Alerter>(query, &user, &all_tags)
.await
}
}
@@ -57,15 +69,16 @@ impl Resolve<GetAlertersSummary, User> for State {
GetAlertersSummary {}: GetAlertersSummary,
user: User,
) -> anyhow::Result<GetAlertersSummaryResponse> {
let query =
match resource::get_resource_ids_for_user::<Alerter>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
let query = match resource::get_resource_object_ids_for_user::<
Alerter,
>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
let total = db_client()
.alerters
.count_documents(query)

View File

@@ -22,6 +22,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{
action_states, build_state_cache, db_client, github_client, State,
@@ -49,7 +50,12 @@ impl Resolve<ListBuilds, User> for State {
ListBuilds { query }: ListBuilds,
user: User,
) -> anyhow::Result<Vec<BuildListItem>> {
resource::list_for_user::<Build>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Build>(query, &user, &all_tags).await
}
}
@@ -59,7 +65,13 @@ impl Resolve<ListFullBuilds, User> for State {
ListFullBuilds { query }: ListFullBuilds,
user: User,
) -> anyhow::Result<ListFullBuildsResponse> {
resource::list_full_for_user::<Build>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Build>(query, &user, &all_tags)
.await
}
}
@@ -94,6 +106,7 @@ impl Resolve<GetBuildsSummary, User> for State {
let builds = resource::list_full_for_user::<Build>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get all builds")?;
@@ -252,9 +265,15 @@ impl Resolve<ListCommonBuildExtraArgs, User> for State {
ListCommonBuildExtraArgs { query }: ListCommonBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonBuildExtraArgsResponse> {
let builds = resource::list_full_for_user::<Build>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let builds =
resource::list_full_for_user::<Build>(query, &user, &all_tags)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();

View File

@@ -12,6 +12,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{db_client, State},
};
@@ -37,7 +38,12 @@ impl Resolve<ListBuilders, User> for State {
ListBuilders { query }: ListBuilders,
user: User,
) -> anyhow::Result<Vec<BuilderListItem>> {
resource::list_for_user::<Builder>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Builder>(query, &user, &all_tags).await
}
}
@@ -47,7 +53,13 @@ impl Resolve<ListFullBuilders, User> for State {
ListFullBuilders { query }: ListFullBuilders,
user: User,
) -> anyhow::Result<ListFullBuildersResponse> {
resource::list_full_for_user::<Builder>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Builder>(query, &user, &all_tags)
.await
}
}
@@ -57,15 +69,16 @@ impl Resolve<GetBuildersSummary, User> for State {
GetBuildersSummary {}: GetBuildersSummary,
user: User,
) -> anyhow::Result<GetBuildersSummaryResponse> {
let query =
match resource::get_resource_ids_for_user::<Builder>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
let query = match resource::get_resource_object_ids_for_user::<
Builder,
>(&user)
.await?
{
Some(ids) => doc! {
"_id": { "$in": ids }
},
None => Document::new(),
};
let total = db_client()
.builders
.count_documents(query)

View File

@@ -19,7 +19,7 @@ use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::periphery_client,
helpers::{periphery_client, query::get_all_tags},
resource,
state::{action_states, deployment_status_cache, State},
};
@@ -45,7 +45,13 @@ impl Resolve<ListDeployments, User> for State {
ListDeployments { query }: ListDeployments,
user: User,
) -> anyhow::Result<Vec<DeploymentListItem>> {
resource::list_for_user::<Deployment>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Deployment>(query, &user, &all_tags)
.await
}
}
@@ -55,7 +61,15 @@ impl Resolve<ListFullDeployments, User> for State {
ListFullDeployments { query }: ListFullDeployments,
user: User,
) -> anyhow::Result<ListFullDeploymentsResponse> {
resource::list_full_for_user::<Deployment>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Deployment>(
query, &user, &all_tags,
)
.await
}
}
@@ -217,6 +231,7 @@ impl Resolve<GetDeploymentsSummary, User> for State {
let deployments = resource::list_full_for_user::<Deployment>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get deployments from db")?;
@@ -254,10 +269,16 @@ impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
ListCommonDeploymentExtraArgs { query }: ListCommonDeploymentExtraArgs,
user: User,
) -> anyhow::Result<ListCommonDeploymentExtraArgsResponse> {
let deployments =
resource::list_full_for_user::<Deployment>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let deployments = resource::list_full_for_user::<Deployment>(
query, &user, &all_tags,
)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();

View File

@@ -29,6 +29,7 @@ use crate::{
resource, state::State,
};
mod action;
mod alert;
mod alerter;
mod build;
@@ -88,6 +89,13 @@ enum ReadRequest {
ListProcedures(ListProcedures),
ListFullProcedures(ListFullProcedures),
// ==== ACTION ====
GetActionsSummary(GetActionsSummary),
GetAction(GetAction),
GetActionActionState(GetActionActionState),
ListActions(ListActions),
ListFullActions(ListFullActions),
// ==== SERVER TEMPLATE ====
GetServerTemplate(GetServerTemplate),
GetServerTemplatesSummary(GetServerTemplatesSummary),
@@ -111,6 +119,7 @@ enum ReadRequest {
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
ListAllDockerContainers(ListAllDockerContainers),
#[to_string_resolver]
ListDockerContainers(ListDockerContainers),
#[to_string_resolver]
@@ -402,12 +411,18 @@ impl Resolve<ListGitProvidersFromConfig, User> for State {
let (builds, repos, syncs) = tokio::try_join!(
resource::list_full_for_user::<Build>(
Default::default(),
&user
&user,
&[]
),
resource::list_full_for_user::<Repo>(
Default::default(),
&user,
&[]
),
resource::list_full_for_user::<Repo>(Default::default(), &user),
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user
&user,
&[]
),
)?;

View File

@@ -10,6 +10,7 @@ use komodo_client::{
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{action_states, procedure_state_cache, State},
};
@@ -35,7 +36,13 @@ impl Resolve<ListProcedures, User> for State {
ListProcedures { query }: ListProcedures,
user: User,
) -> anyhow::Result<ListProceduresResponse> {
resource::list_for_user::<Procedure>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Procedure>(query, &user, &all_tags)
.await
}
}
@@ -45,7 +52,13 @@ impl Resolve<ListFullProcedures, User> for State {
ListFullProcedures { query }: ListFullProcedures,
user: User,
) -> anyhow::Result<ListFullProceduresResponse> {
resource::list_full_for_user::<Procedure>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Procedure>(query, &user, &all_tags)
.await
}
}
@@ -58,6 +71,7 @@ impl Resolve<GetProceduresSummary, User> for State {
let procedures = resource::list_full_for_user::<Procedure>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get procedures from db")?;

View File

@@ -12,6 +12,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{action_states, github_client, repo_state_cache, State},
};
@@ -37,7 +38,12 @@ impl Resolve<ListRepos, User> for State {
ListRepos { query }: ListRepos,
user: User,
) -> anyhow::Result<Vec<RepoListItem>> {
resource::list_for_user::<Repo>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Repo>(query, &user, &all_tags).await
}
}
@@ -47,7 +53,13 @@ impl Resolve<ListFullRepos, User> for State {
ListFullRepos { query }: ListFullRepos,
user: User,
) -> anyhow::Result<ListFullReposResponse> {
resource::list_full_for_user::<Repo>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Repo>(query, &user, &all_tags)
.await
}
}
@@ -79,10 +91,13 @@ impl Resolve<GetReposSummary, User> for State {
GetReposSummary {}: GetReposSummary,
user: User,
) -> anyhow::Result<GetReposSummaryResponse> {
let repos =
resource::list_full_for_user::<Repo>(Default::default(), &user)
.await
.context("failed to get repos from db")?;
let repos = resource::list_full_for_user::<Repo>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get repos from db")?;
let mut res = GetReposSummaryResponse::default();

View File

@@ -13,7 +13,7 @@ use komodo_client::{
entities::{
deployment::Deployment,
docker::{
container::Container,
container::{Container, ContainerListItem},
image::{Image, ImageHistoryResponseItem},
network::Network,
volume::Volume,
@@ -43,7 +43,7 @@ use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
helpers::periphery_client,
helpers::{periphery_client, query::get_all_tags},
resource,
stack::compose_container_match_regex,
state::{action_states, db_client, server_status_cache, State},
@@ -55,9 +55,12 @@ impl Resolve<GetServersSummary, User> for State {
GetServersSummary {}: GetServersSummary,
user: User,
) -> anyhow::Result<GetServersSummaryResponse> {
let servers =
resource::list_for_user::<Server>(Default::default(), &user)
.await?;
let servers = resource::list_for_user::<Server>(
Default::default(),
&user,
&[],
)
.await?;
let mut res = GetServersSummaryResponse::default();
for server in servers {
res.total += 1;
@@ -119,7 +122,12 @@ impl Resolve<ListServers, User> for State {
ListServers { query }: ListServers,
user: User,
) -> anyhow::Result<Vec<ServerListItem>> {
resource::list_for_user::<Server>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Server>(query, &user, &all_tags).await
}
}
@@ -129,7 +137,13 @@ impl Resolve<ListFullServers, User> for State {
ListFullServers { query }: ListFullServers,
user: User,
) -> anyhow::Result<ListFullServersResponse> {
resource::list_full_for_user::<Server>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Server>(query, &user, &all_tags)
.await
}
}
@@ -289,7 +303,7 @@ impl ResolveToString<ListSystemProcesses, User> for State {
}
}
const STATS_PER_PAGE: i64 = 500;
const STATS_PER_PAGE: i64 = 200;
impl Resolve<GetHistoricalServerStats, User> for State {
async fn resolve(
@@ -368,6 +382,40 @@ impl ResolveToString<ListDockerContainers, User> for State {
}
}
impl Resolve<ListAllDockerContainers, User> for State {
async fn resolve(
&self,
ListAllDockerContainers { servers }: ListAllDockerContainers,
user: User,
) -> anyhow::Result<Vec<ContainerListItem>> {
let servers = resource::list_for_user::<Server>(
Default::default(),
&user,
&[],
)
.await?
.into_iter()
.filter(|server| {
servers.is_empty()
|| servers.contains(&server.id)
|| servers.contains(&server.name)
});
let mut containers = Vec::<ContainerListItem>::new();
for server in servers {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(more_containers) = &cache.containers {
containers.extend(more_containers.clone());
}
}
Ok(containers)
}
}
impl Resolve<InspectDockerContainer, User> for State {
async fn resolve(
&self,

View File

@@ -11,6 +11,7 @@ use mungos::mongodb::bson::doc;
use resolver_api::Resolve;
use crate::{
helpers::query::get_all_tags,
resource,
state::{db_client, State},
};
@@ -36,7 +37,13 @@ impl Resolve<ListServerTemplates, User> for State {
ListServerTemplates { query }: ListServerTemplates,
user: User,
) -> anyhow::Result<ListServerTemplatesResponse> {
resource::list_for_user::<ServerTemplate>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<ServerTemplate>(query, &user, &all_tags)
.await
}
}
@@ -46,7 +53,15 @@ impl Resolve<ListFullServerTemplates, User> for State {
ListFullServerTemplates { query }: ListFullServerTemplates,
user: User,
) -> anyhow::Result<ListFullServerTemplatesResponse> {
resource::list_full_for_user::<ServerTemplate>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<ServerTemplate>(
query, &user, &all_tags,
)
.await
}
}
@@ -56,7 +71,7 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
user: User,
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
let query = match resource::get_resource_ids_for_user::<
let query = match resource::get_resource_object_ids_for_user::<
ServerTemplate,
>(&user)
.await?

View File

@@ -17,7 +17,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::periphery_client,
helpers::{periphery_client, query::get_all_tags},
resource,
stack::get_stack_and_server,
state::{action_states, github_client, stack_status_cache, State},
@@ -133,9 +133,15 @@ impl Resolve<ListCommonStackExtraArgs, User> for State {
ListCommonStackExtraArgs { query }: ListCommonStackExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let stacks =
resource::list_full_for_user::<Stack>(query, &user, &all_tags)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
@@ -158,9 +164,15 @@ impl Resolve<ListCommonStackBuildExtraArgs, User> for State {
ListCommonStackBuildExtraArgs { query }: ListCommonStackBuildExtraArgs,
user: User,
) -> anyhow::Result<ListCommonStackBuildExtraArgsResponse> {
let stacks = resource::list_full_for_user::<Stack>(query, &user)
.await
.context("failed to get resources matching query")?;
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
let stacks =
resource::list_full_for_user::<Stack>(query, &user, &all_tags)
.await
.context("failed to get resources matching query")?;
// first collect with guaranteed uniqueness
let mut res = HashSet::<String>::new();
@@ -183,7 +195,12 @@ impl Resolve<ListStacks, User> for State {
ListStacks { query }: ListStacks,
user: User,
) -> anyhow::Result<Vec<StackListItem>> {
resource::list_for_user::<Stack>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<Stack>(query, &user, &all_tags).await
}
}
@@ -193,7 +210,13 @@ impl Resolve<ListFullStacks, User> for State {
ListFullStacks { query }: ListFullStacks,
user: User,
) -> anyhow::Result<ListFullStacksResponse> {
resource::list_full_for_user::<Stack>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<Stack>(query, &user, &all_tags)
.await
}
}
@@ -228,6 +251,7 @@ impl Resolve<GetStacksSummary, User> for State {
let stacks = resource::list_full_for_user::<Stack>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get stacks from db")?;

View File

@@ -15,6 +15,7 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::query::get_all_tags,
resource,
state::{
action_states, github_client, resource_sync_state_cache, State,
@@ -42,7 +43,13 @@ impl Resolve<ListResourceSyncs, User> for State {
ListResourceSyncs { query }: ListResourceSyncs,
user: User,
) -> anyhow::Result<Vec<ResourceSyncListItem>> {
resource::list_for_user::<ResourceSync>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_for_user::<ResourceSync>(query, &user, &all_tags)
.await
}
}
@@ -52,7 +59,15 @@ impl Resolve<ListFullResourceSyncs, User> for State {
ListFullResourceSyncs { query }: ListFullResourceSyncs,
user: User,
) -> anyhow::Result<ListFullResourceSyncsResponse> {
resource::list_full_for_user::<ResourceSync>(query, &user).await
let all_tags = if query.tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
resource::list_full_for_user::<ResourceSync>(
query, &user, &all_tags,
)
.await
}
}
@@ -88,6 +103,7 @@ impl Resolve<GetResourceSyncsSummary, User> for State {
resource::list_full_for_user::<ResourceSync>(
Default::default(),
&user,
&[],
)
.await
.context("failed to get resource_syncs from db")?;

View File

@@ -1,27 +1,16 @@
use std::collections::HashMap;
use anyhow::Context;
use komodo_client::{
api::read::{
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
ExportResourcesToToml, ExportResourcesToTomlResponse,
GetUserGroup, ListUserTargetPermissions,
ListUserGroups,
},
entities::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
permission::{PermissionLevel, UserTarget},
procedure::Procedure,
repo::Repo,
resource::ResourceQuery,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
toml::{PermissionToml, ResourcesToml, UserGroupToml},
user::User,
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, resource::ResourceQuery,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, toml::ResourcesToml, user::User,
ResourceTarget,
},
};
@@ -29,11 +18,14 @@ use mungos::find::find_collect;
use resolver_api::Resolve;
use crate::{
helpers::query::{get_id_to_tags, get_user_user_group_ids},
helpers::query::{
get_all_tags, get_id_to_tags, get_user_user_group_ids,
},
resource,
state::{db_client, State},
sync::{
toml::{convert_resource, ToToml, TOML_PRETTY_OPTIONS},
user_groups::convert_user_groups,
AllResourcesById,
},
};
@@ -46,10 +38,17 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
) -> anyhow::Result<ExportAllResourcesToTomlResponse> {
let mut targets = Vec::<ResourceTarget>::new();
let all_tags = if tags.is_empty() {
vec![]
} else {
get_all_tags(None).await?
};
targets.extend(
resource::list_for_user::<Alerter>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -59,6 +58,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Builder>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -68,6 +68,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Server>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -77,6 +78,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Deployment>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -86,6 +88,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Stack>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -95,6 +98,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Build>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -104,6 +108,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Repo>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -113,15 +118,27 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_for_user::<Procedure>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Procedure(resource.id)),
);
targets.extend(
resource::list_for_user::<Action>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
.map(|resource| ResourceTarget::Action(resource.id)),
);
targets.extend(
resource::list_for_user::<ServerTemplate>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -131,6 +148,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
resource::list_full_for_user::<ResourceSync>(
ResourceQuery::builder().tags(tags.clone()).build(),
&user,
&all_tags,
)
.await?
.into_iter()
@@ -184,9 +202,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
PermissionLevel::Read,
)
.await?;
res
.alerters
.push(convert_resource::<Alerter>(alerter, &id_to_tags))
res.alerters.push(convert_resource::<Alerter>(
alerter,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::ResourceSync(id) => {
let sync = resource::get_check_permissions::<ResourceSync>(
@@ -201,6 +222,8 @@ impl Resolve<ExportResourcesToToml, User> for State {
{
res.resource_syncs.push(convert_resource::<ResourceSync>(
sync,
false,
vec![],
&id_to_tags,
))
}
@@ -213,7 +236,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
res.server_templates.push(
convert_resource::<ServerTemplate>(template, &id_to_tags),
convert_resource::<ServerTemplate>(
template,
false,
vec![],
&id_to_tags,
),
)
}
ResourceTarget::Server(id) => {
@@ -223,9 +251,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
PermissionLevel::Read,
)
.await?;
res
.servers
.push(convert_resource::<Server>(server, &id_to_tags))
res.servers.push(convert_resource::<Server>(
server,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Builder(id) => {
let mut builder =
@@ -236,9 +267,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Builder::replace_ids(&mut builder, &all);
res
.builders
.push(convert_resource::<Builder>(builder, &id_to_tags))
res.builders.push(convert_resource::<Builder>(
builder,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Build(id) => {
let mut build = resource::get_check_permissions::<Build>(
@@ -248,9 +282,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Build::replace_ids(&mut build, &all);
res
.builds
.push(convert_resource::<Build>(build, &id_to_tags))
res.builds.push(convert_resource::<Build>(
build,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Deployment(id) => {
let mut deployment = resource::get_check_permissions::<
@@ -262,6 +299,8 @@ impl Resolve<ExportResourcesToToml, User> for State {
Deployment::replace_ids(&mut deployment, &all);
res.deployments.push(convert_resource::<Deployment>(
deployment,
false,
vec![],
&id_to_tags,
))
}
@@ -273,7 +312,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Repo::replace_ids(&mut repo, &all);
res.repos.push(convert_resource::<Repo>(repo, &id_to_tags))
res.repos.push(convert_resource::<Repo>(
repo,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Stack(id) => {
let mut stack = resource::get_check_permissions::<Stack>(
@@ -283,9 +327,12 @@ impl Resolve<ExportResourcesToToml, User> for State {
)
.await?;
Stack::replace_ids(&mut stack, &all);
res
.stacks
.push(convert_resource::<Stack>(stack, &id_to_tags))
res.stacks.push(convert_resource::<Stack>(
stack,
false,
vec![],
&id_to_tags,
))
}
ResourceTarget::Procedure(id) => {
let mut procedure = resource::get_check_permissions::<
@@ -297,6 +344,23 @@ impl Resolve<ExportResourcesToToml, User> for State {
Procedure::replace_ids(&mut procedure, &all);
res.procedures.push(convert_resource::<Procedure>(
procedure,
false,
vec![],
&id_to_tags,
));
}
ResourceTarget::Action(id) => {
let mut action = resource::get_check_permissions::<Action>(
&id,
&user,
PermissionLevel::Read,
)
.await?;
Action::replace_ids(&mut action, &all);
res.actions.push(convert_resource::<Action>(
action,
false,
vec![],
&id_to_tags,
));
}
@@ -336,122 +400,17 @@ async fn add_user_groups(
all: &AllResourcesById,
user: &User,
) -> anyhow::Result<()> {
let db = db_client();
let usernames = find_collect(&db.users, None, None)
let user_groups = State
.resolve(ListUserGroups {}, user.clone())
.await?
.into_iter()
.map(|user| (user.id, user.username))
.collect::<HashMap<_, _>>();
for user_group in user_groups {
let ug = State
.resolve(GetUserGroup { user_group }, user.clone())
.await?;
// this method is admin only, but we already know user can see user group if above does not return Err
let permissions = State
.resolve(
ListUserTargetPermissions {
user_target: UserTarget::UserGroup(ug.id),
},
User {
admin: true,
..Default::default()
},
)
.await?
.into_iter()
.map(|mut permission| {
match &mut permission.resource_target {
ResourceTarget::Build(id) => {
*id = all
.builds
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = all
.builders
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = all
.deployments
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = all
.servers
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = all
.repos
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = all
.alerters
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = all
.procedures
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = all
.templates
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = all
.syncs
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Stack(id) => {
*id = all
.stacks
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::System(_) => {}
}
PermissionToml {
target: permission.resource_target,
level: permission.level,
}
})
.collect();
res.user_groups.push(UserGroupToml {
name: ug.name,
users: ug
.users
.into_iter()
.filter_map(|user_id| usernames.get(&user_id).cloned())
.collect(),
all: ug.all,
permissions,
.filter(|ug| {
user_groups.contains(&ug.name) || user_groups.contains(&ug.id)
});
}
let mut ug = Vec::with_capacity(user_groups.size_hint().0);
convert_user_groups(user_groups, all, &mut ug).await?;
res.user_groups = ug.into_iter().map(|ug| ug.1).collect();
Ok(())
}
@@ -508,6 +467,14 @@ fn serialize_resources_toml(
Procedure::push_to_toml_string(procedure, &mut toml)?;
}
for action in resources.actions {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");
}
toml.push_str("[[action]]\n");
Action::push_to_toml_string(action, &mut toml)?;
}
for alerter in resources.alerters {
if !toml.is_empty() {
toml.push_str("\n\n##\n\n");

View File

@@ -4,6 +4,7 @@ use anyhow::{anyhow, Context};
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
@@ -104,6 +105,16 @@ impl Resolve<ListUpdates, User> for State {
})
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
let action_query =
resource::get_resource_ids_for_user::<Action>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "Action", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "Action" });
let builder_query =
resource::get_resource_ids_for_user::<Builder>(&user)
.await?
@@ -124,27 +135,27 @@ impl Resolve<ListUpdates, User> for State {
})
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
let server_template_query = resource::get_resource_ids_for_user::<ServerTemplate>(
&user,
)
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let server_template_query =
resource::get_resource_ids_for_user::<ServerTemplate>(&user)
.await?
.map(|ids| {
doc! {
"target.type": "ServerTemplate", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
let resource_sync_query = resource::get_resource_ids_for_user::<ResourceSync>(
&user,
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let resource_sync_query =
resource::get_resource_ids_for_user::<ResourceSync>(
&user,
)
.await?
.map(|ids| {
doc! {
"target.type": "ResourceSync", "target.id": { "$in": ids }
}
})
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
let mut query = query.unwrap_or_default();
query.extend(doc! {
@@ -155,6 +166,7 @@ impl Resolve<ListUpdates, User> for State {
build_query,
repo_query,
procedure_query,
action_query,
alerter_query,
builder_query,
server_template_query,
@@ -292,6 +304,14 @@ impl Resolve<GetUpdate, User> for State {
)
.await?;
}
ResourceTarget::Action(id) => {
resource::get_check_permissions::<Action>(
id,
&user,
PermissionLevel::Read,
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
id,

View File

@@ -6,7 +6,7 @@ use komodo_client::{
ListApiKeysForServiceUserResponse, ListApiKeysResponse,
ListUsers, ListUsersResponse,
},
entities::user::{User, UserConfig},
entities::user::{admin_service_user, User, UserConfig},
};
use mungos::{
by_id::find_one_by_id,
@@ -26,6 +26,13 @@ impl Resolve<GetUsername, User> for State {
GetUsername { user_id }: GetUsername,
_: User,
) -> anyhow::Result<GetUsernameResponse> {
if let Some(user) = admin_service_user(&user_id) {
return Ok(GetUsernameResponse {
username: user.username,
avatar: None,
});
}
let user = find_one_by_id(&db_client().users, &user_id)
.await
.context("failed at mongo query for user")?

View File

@@ -0,0 +1,71 @@
use komodo_client::{
api::write::*,
entities::{
action::Action, permission::PermissionLevel, update::Update,
user::User,
},
};
use resolver_api::Resolve;
use crate::{resource, state::State};
impl Resolve<CreateAction, User> for State {
#[instrument(name = "CreateAction", skip(self, user))]
async fn resolve(
&self,
CreateAction { name, config }: CreateAction,
user: User,
) -> anyhow::Result<Action> {
resource::create::<Action>(&name, config, &user).await
}
}
impl Resolve<CopyAction, User> for State {
#[instrument(name = "CopyAction", skip(self, user))]
async fn resolve(
&self,
CopyAction { name, id }: CopyAction,
user: User,
) -> anyhow::Result<Action> {
let Action { config, .. } = resource::get_check_permissions::<
Action,
>(
&id, &user, PermissionLevel::Write
)
.await?;
resource::create::<Action>(&name, config.into(), &user).await
}
}
impl Resolve<UpdateAction, User> for State {
#[instrument(name = "UpdateAction", skip(self, user))]
async fn resolve(
&self,
UpdateAction { id, config }: UpdateAction,
user: User,
) -> anyhow::Result<Action> {
resource::update::<Action>(&id, config, &user).await
}
}
impl Resolve<RenameAction, User> for State {
#[instrument(name = "RenameAction", skip(self, user))]
async fn resolve(
&self,
RenameAction { id, name }: RenameAction,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<Action>(&id, &name, &user).await
}
}
impl Resolve<DeleteAction, User> for State {
#[instrument(name = "DeleteAction", skip(self, user))]
async fn resolve(
&self,
DeleteAction { id }: DeleteAction,
user: User,
) -> anyhow::Result<Action> {
resource::delete::<Action>(&id, &user).await
}
}

View File

@@ -1,9 +1,8 @@
use komodo_client::{
api::write::{
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
},
api::write::*,
entities::{
alerter::Alerter, permission::PermissionLevel, user::User,
alerter::Alerter, permission::PermissionLevel, update::Update,
user::User,
},
};
use resolver_api::Resolve;
@@ -59,3 +58,14 @@ impl Resolve<UpdateAlerter, User> for State {
resource::update::<Alerter>(&id, config, &user).await
}
}
impl Resolve<RenameAlerter, User> for State {
#[instrument(name = "RenameAlerter", skip(self, user))]
async fn resolve(
&self,
RenameAlerter { id, name }: RenameAlerter,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<Alerter>(&id, &name, &user).await
}
}

View File

@@ -6,6 +6,7 @@ use komodo_client::{
build::{Build, BuildInfo, PartialBuildConfig},
config::core::CoreConfig,
permission::PermissionLevel,
update::Update,
user::User,
CloneArgs, NoData,
},
@@ -77,6 +78,17 @@ impl Resolve<UpdateBuild, User> for State {
}
}
impl Resolve<RenameBuild, User> for State {
#[instrument(name = "RenameBuild", skip(self, user))]
async fn resolve(
&self,
RenameBuild { id, name }: RenameBuild,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<Build>(&id, &name, &user).await
}
}
impl Resolve<RefreshBuildCache, User> for State {
#[instrument(
name = "RefreshBuildCache",

View File

@@ -1,7 +1,8 @@
use komodo_client::{
api::write::*,
entities::{
builder::Builder, permission::PermissionLevel, user::User,
builder::Builder, permission::PermissionLevel, update::Update,
user::User,
},
};
use resolver_api::Resolve;
@@ -57,3 +58,14 @@ impl Resolve<UpdateBuilder, User> for State {
resource::update::<Builder>(&id, config, &user).await
}
}
impl Resolve<RenameBuilder, User> for State {
#[instrument(name = "RenameBuilder", skip(self, user))]
async fn resolve(
&self,
RenameBuilder { id, name }: RenameBuilder,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<Builder>(&id, &name, &user).await
}
}

View File

@@ -108,7 +108,7 @@ impl Resolve<RenameDeployment, User> for State {
if container_state == DeploymentState::Unknown {
return Err(anyhow!(
"cannot rename deployment when container status is unknown"
"Cannot rename Deployment when container status is unknown"
));
}
@@ -124,7 +124,7 @@ impl Resolve<RenameDeployment, User> for State {
None,
)
.await
.context("failed to update deployment name on db")?;
.context("Failed to update Deployment name on db")?;
if container_state != DeploymentState::NotDeployed {
let server =
@@ -135,20 +135,19 @@ impl Resolve<RenameDeployment, User> for State {
new_name: name.clone(),
})
.await
.context("failed to rename container on server")?;
.context("Failed to rename container on server")?;
update.logs.push(log);
}
update.push_simple_log(
"rename deployment",
"Rename Deployment",
format!(
"renamed deployment from {} to {}",
"Renamed Deployment from {} to {}",
deployment.name, name
),
);
update.finalize();
add_update(update.clone()).await?;
update.id = add_update(update.clone()).await?;
Ok(update)
}

View File

@@ -2,7 +2,7 @@ use anyhow::anyhow;
use komodo_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
alerter::Alerter, build::Build, builder::Builder,
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, user::User, ResourceTarget,
@@ -84,6 +84,14 @@ impl Resolve<UpdateDescription, User> for State {
)
.await?;
}
ResourceTarget::Action(id) => {
resource::update_description::<Action>(
&id,
&description,
&user,
)
.await?;
}
ResourceTarget::ServerTemplate(id) => {
resource::update_description::<ServerTemplate>(
&id,

View File

@@ -13,6 +13,7 @@ use uuid::Uuid;
use crate::{auth::auth_request, state::State};
mod action;
mod alerter;
mod build;
mod builder;
@@ -28,6 +29,7 @@ mod service_user;
mod stack;
mod sync;
mod tag;
mod user;
mod user_group;
mod variable;
@@ -40,6 +42,11 @@ mod variable;
#[resolver_args(User)]
#[serde(tag = "type", content = "params")]
pub enum WriteRequest {
// ==== USER ====
UpdateUserUsername(UpdateUserUsername),
UpdateUserPassword(UpdateUserPassword),
DeleteUser(DeleteUser),
// ==== SERVICE USER ====
CreateServiceUser(CreateServiceUser),
UpdateServiceUserDescription(UpdateServiceUserDescription),
@@ -82,6 +89,7 @@ pub enum WriteRequest {
CopyBuild(CopyBuild),
DeleteBuild(DeleteBuild),
UpdateBuild(UpdateBuild),
RenameBuild(RenameBuild),
RefreshBuildCache(RefreshBuildCache),
CreateBuildWebhook(CreateBuildWebhook),
DeleteBuildWebhook(DeleteBuildWebhook),
@@ -91,18 +99,21 @@ pub enum WriteRequest {
CopyBuilder(CopyBuilder),
DeleteBuilder(DeleteBuilder),
UpdateBuilder(UpdateBuilder),
RenameBuilder(RenameBuilder),
// ==== SERVER TEMPLATE ====
CreateServerTemplate(CreateServerTemplate),
CopyServerTemplate(CopyServerTemplate),
DeleteServerTemplate(DeleteServerTemplate),
UpdateServerTemplate(UpdateServerTemplate),
RenameServerTemplate(RenameServerTemplate),
// ==== REPO ====
CreateRepo(CreateRepo),
CopyRepo(CopyRepo),
DeleteRepo(DeleteRepo),
UpdateRepo(UpdateRepo),
RenameRepo(RenameRepo),
RefreshRepoCache(RefreshRepoCache),
CreateRepoWebhook(CreateRepoWebhook),
DeleteRepoWebhook(DeleteRepoWebhook),
@@ -112,20 +123,31 @@ pub enum WriteRequest {
CopyAlerter(CopyAlerter),
DeleteAlerter(DeleteAlerter),
UpdateAlerter(UpdateAlerter),
RenameAlerter(RenameAlerter),
// ==== PROCEDURE ====
CreateProcedure(CreateProcedure),
CopyProcedure(CopyProcedure),
DeleteProcedure(DeleteProcedure),
UpdateProcedure(UpdateProcedure),
RenameProcedure(RenameProcedure),
// ==== ACTION ====
CreateAction(CreateAction),
CopyAction(CopyAction),
DeleteAction(DeleteAction),
UpdateAction(UpdateAction),
RenameAction(RenameAction),
// ==== SYNC ====
CreateResourceSync(CreateResourceSync),
CopyResourceSync(CopyResourceSync),
DeleteResourceSync(DeleteResourceSync),
UpdateResourceSync(UpdateResourceSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
RenameResourceSync(RenameResourceSync),
WriteSyncFileContents(WriteSyncFileContents),
CommitSync(CommitSync),
RefreshResourceSyncPending(RefreshResourceSyncPending),
CreateSyncWebhook(CreateSyncWebhook),
DeleteSyncWebhook(DeleteSyncWebhook),

View File

@@ -387,6 +387,20 @@ async fn extract_resource_target_with_validation(
.id;
Ok((ResourceTargetVariant::Procedure, id))
}
ResourceTarget::Action(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },
Err(_) => doc! { "name": ident },
};
let id = db_client()
.actions
.find_one(filter)
.await
.context("failed to query db for actions")?
.context("no matching action found")?
.id;
Ok((ResourceTargetVariant::Action, id))
}
ResourceTarget::ServerTemplate(ident) => {
let filter = match ObjectId::from_str(ident) {
Ok(id) => doc! { "_id": id },

View File

@@ -1,7 +1,8 @@
use komodo_client::{
api::write::*,
entities::{
permission::PermissionLevel, procedure::Procedure, user::User,
permission::PermissionLevel, procedure::Procedure,
update::Update, user::User,
},
};
use resolver_api::Resolve;
@@ -48,6 +49,17 @@ impl Resolve<UpdateProcedure, User> for State {
}
}
impl Resolve<RenameProcedure, User> for State {
#[instrument(name = "RenameProcedure", skip(self, user))]
async fn resolve(
&self,
RenameProcedure { id, name }: RenameProcedure,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<Procedure>(&id, &name, &user).await
}
}
impl Resolve<DeleteProcedure, User> for State {
#[instrument(name = "DeleteProcedure", skip(self, user))]
async fn resolve(

View File

@@ -1,27 +1,36 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use git::GitRes;
use komodo_client::{
api::write::*,
entities::{
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
repo::{PartialRepoConfig, Repo, RepoInfo},
server::Server,
to_komodo_name,
update::{Log, Update},
user::User,
CloneArgs, NoData,
CloneArgs, NoData, Operation,
},
};
use mongo_indexed::doc;
use mungos::mongodb::bson::to_document;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::api;
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::git_token,
helpers::{
git_token, periphery_client,
update::{add_update, make_update},
},
resource,
state::{db_client, github_client, State},
state::{action_states, db_client, github_client, State},
};
impl Resolve<CreateRepo, User> for State {
@@ -75,6 +84,81 @@ impl Resolve<UpdateRepo, User> for State {
}
}
impl Resolve<RenameRepo, User> for State {
#[instrument(name = "RenameRepo", skip(self, user))]
async fn resolve(
&self,
RenameRepo { id, name }: RenameRepo,
user: User,
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
if repo.config.server_id.is_empty()
|| !repo.config.path.is_empty()
{
return resource::rename::<Repo>(&repo.id, &name, &user).await;
}
// get the action state for the repo (or insert default).
let action_state =
action_states().repo.get_or_insert_default(&repo.id).await;
// Will check to ensure repo not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.renaming = true)?;
let name = to_komodo_name(&name);
let mut update = make_update(&repo, Operation::RenameRepo, &user);
update_one_by_id(
&db_client().repos,
&repo.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)
.await
.context("Failed to update Repo name on db")?;
let server =
resource::get::<Server>(&repo.config.server_id).await?;
let log = match periphery_client(&server)?
.request(api::git::RenameRepo {
curr_name: to_komodo_name(&repo.name),
new_name: name.clone(),
})
.await
.context("Failed to rename Repo directory on Server")
{
Ok(log) => log,
Err(e) => Log::error(
"Rename Repo directory failure",
format_serror(&e.into()),
),
};
update.logs.push(log);
update.push_simple_log(
"Rename Repo",
format!("Renamed Repo from {} to {}", repo.name, name),
);
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RefreshRepoCache, User> for State {
#[instrument(
name = "RefreshRepoCache",

View File

@@ -1,9 +1,7 @@
use anyhow::Context;
use formatting::format_serror;
use komodo_client::{
api::write::*,
entities::{
komodo_timestamp,
permission::PermissionLevel,
server::Server,
update::{Update, UpdateStatus},
@@ -11,7 +9,6 @@ use komodo_client::{
Operation,
},
};
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
use periphery_client::api;
use resolver_api::Resolve;
@@ -21,7 +18,7 @@ use crate::{
update::{add_update, make_update, update_update},
},
resource,
state::{db_client, State},
state::State,
};
impl Resolve<CreateServer, User> for State {
@@ -64,25 +61,7 @@ impl Resolve<RenameServer, User> for State {
RenameServer { id, name }: RenameServer,
user: User,
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&server, Operation::RenameServer, &user);
update_one_by_id(&db_client().servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None)
.await
.context("failed to update server on db. this name may already be taken.")?;
update.push_simple_log(
"rename server",
format!("renamed server {id} from {} to {name}", server.name),
);
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
resource::rename::<Server>(&id, &name, &user).await
}
}

View File

@@ -1,11 +1,11 @@
use komodo_client::{
api::write::{
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
UpdateServerTemplate,
RenameServerTemplate, UpdateServerTemplate,
},
entities::{
permission::PermissionLevel, server_template::ServerTemplate,
user::User,
update::Update, user::User,
},
};
use resolver_api::Resolve;
@@ -63,3 +63,14 @@ impl Resolve<UpdateServerTemplate, User> for State {
resource::update::<ServerTemplate>(&id, config, &user).await
}
}
impl Resolve<RenameServerTemplate, User> for State {
#[instrument(name = "RenameServerTemplate", skip(self, user))]
async fn resolve(
&self,
RenameServerTemplate { id, name }: RenameServerTemplate,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<ServerTemplate>(&id, &name, &user).await
}
}

View File

@@ -4,7 +4,6 @@ use komodo_client::{
api::write::*,
entities::{
config::core::CoreConfig,
komodo_timestamp,
permission::PermissionLevel,
server::ServerState,
stack::{PartialStackConfig, Stack, StackInfo},
@@ -13,23 +12,20 @@ use komodo_client::{
FileContents, NoData, Operation,
},
};
use mungos::{
by_id::update_one_by_id,
mongodb::bson::{doc, to_document},
};
use mungos::mongodb::bson::{doc, to_document};
use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use periphery_client::api::compose::{
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
WriteComposeContentsToHost,
WriteCommitComposeContents, WriteComposeContentsToHost,
};
use resolver_api::Resolve;
use crate::{
config::core_config,
helpers::{
periphery_client,
git_token, periphery_client,
query::get_server_with_state,
update::{add_update, make_update},
},
@@ -100,36 +96,7 @@ impl Resolve<RenameStack, User> for State {
RenameStack { id, name }: RenameStack,
user: User,
) -> anyhow::Result<Update> {
let stack = resource::get_check_permissions::<Stack>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
let mut update =
make_update(&stack, Operation::RenameStack, &user);
update_one_by_id(
&db_client().stacks,
&stack.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)
.await
.context("failed to update stack name on db")?;
update.push_simple_log(
"rename stack",
format!("renamed stack from {} to {}", stack.name, name),
);
update.finalize();
add_update(update.clone()).await?;
Ok(update)
resource::rename::<Stack>(&id, &name, &user).await
}
}
@@ -143,7 +110,7 @@ impl Resolve<WriteStackFileContents, User> for State {
}: WriteStackFileContents,
user: User,
) -> anyhow::Result<Update> {
let (stack, server) = get_stack_and_server(
let (mut stack, server) = get_stack_and_server(
&stack,
&user,
PermissionLevel::Write,
@@ -151,9 +118,9 @@ impl Resolve<WriteStackFileContents, User> for State {
)
.await?;
if !stack.config.files_on_host {
if !stack.config.files_on_host && stack.config.repo.is_empty() {
return Err(anyhow!(
"Stack is not configured to use files on host, can't write file contents"
"Stack is not configured to use Files on Host or Git Repo, can't write file contents"
));
}
@@ -162,30 +129,72 @@ impl Resolve<WriteStackFileContents, User> for State {
update.push_simple_log("File contents to write", &contents);
match periphery_client(&server)?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write file contents",
format_serror(&e.into()),
);
}
};
let stack_id = stack.id.clone();
if stack.config.files_on_host {
match periphery_client(&server)?
.request(WriteComposeContentsToHost {
name: stack.name,
run_directory: stack.config.run_directory,
file_path,
contents,
})
.await
.context("Failed to write contents to host")
{
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update.push_error_log(
"Write file contents",
format_serror(&e.into()),
);
}
};
} else {
let git_token = if !stack.config.git_account.is_empty() {
git_token(
&stack.config.git_provider,
&stack.config.git_account,
|https| stack.config.git_https = https,
)
.await
.with_context(|| {
format!(
"Failed to get git token. | {} | {}",
stack.config.git_account, stack.config.git_provider
)
})?
} else {
None
};
match periphery_client(&server)?
.request(WriteCommitComposeContents {
stack,
username: Some(user.username),
file_path,
contents,
git_token,
})
.await
.context("Failed to write contents to host")
{
Ok(res) => {
update.logs.extend(res.logs);
}
Err(e) => {
update.push_error_log(
"Write file contents",
format_serror(&e.into()),
);
}
};
}
if let Err(e) = State
.resolve(
RefreshStackCache { stack: stack.id },
RefreshStackCache { stack: stack_id },
stack_user().to_owned(),
)
.await
@@ -227,10 +236,11 @@ impl Resolve<RefreshStackCache, User> for State {
.await?;
let file_contents_empty = stack.config.file_contents.is_empty();
let repo_empty = stack.config.repo.is_empty();
if !stack.config.files_on_host
&& file_contents_empty
&& stack.config.repo.is_empty()
&& repo_empty
{
// Nothing to do without one of these
return Ok(NoData {});
@@ -297,7 +307,7 @@ impl Resolve<RefreshStackCache, User> for State {
(services, Some(contents), Some(errors), None, None)
}
}
} else if file_contents_empty {
} else if !repo_empty {
// ================
// REPO BASED STACK
// ================

View File

@@ -6,8 +6,10 @@ use komodo_client::{
api::{read::ExportAllResourcesToToml, write::*},
entities::{
self,
action::Action,
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
all_logs_success,
build::Build,
builder::Builder,
config::core::CoreConfig,
@@ -22,9 +24,10 @@ use komodo_client::{
sync::{
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
},
update::Log,
to_komodo_name,
update::{Log, Update},
user::{sync_user, User},
NoData, Operation, ResourceTarget,
CloneArgs, NoData, Operation, ResourceTarget,
},
};
use mungos::{
@@ -35,6 +38,7 @@ use octorust::types::{
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
};
use resolver_api::Resolve;
use tokio::fs;
use crate::{
alert::send_alerts,
@@ -103,6 +107,297 @@ impl Resolve<UpdateResourceSync, User> for State {
}
}
impl Resolve<RenameResourceSync, User> for State {
#[instrument(name = "RenameResourceSync", skip(self, user))]
async fn resolve(
&self,
RenameResourceSync { id, name }: RenameResourceSync,
user: User,
) -> anyhow::Result<Update> {
resource::rename::<ResourceSync>(&id, &name, &user).await
}
}
impl Resolve<WriteSyncFileContents, User> for State {
async fn resolve(
&self,
WriteSyncFileContents {
sync,
resource_path,
file_path,
contents,
}: WriteSyncFileContents,
user: User,
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<ResourceSync>(
&sync,
&user,
PermissionLevel::Write,
)
.await?;
if !sync.config.files_on_host && sync.config.repo.is_empty() {
return Err(anyhow!(
"This method is only for files on host, or repo based syncs."
));
}
let mut update =
make_update(&sync, Operation::WriteSyncContents, &user);
update.push_simple_log("File contents", &contents);
let root = if sync.config.files_on_host {
core_config()
.sync_directory
.join(to_komodo_name(&sync.name))
} else {
let clone_args: CloneArgs = (&sync).into();
clone_args.unique_path(&core_config().repo_directory)?
};
let file_path =
file_path.parse::<PathBuf>().context("Invalid file path")?;
let resource_path = resource_path
.parse::<PathBuf>()
.context("Invalid resource path")?;
let full_path = root.join(&resource_path).join(&file_path);
if let Some(parent) = full_path.parent() {
let _ = fs::create_dir_all(parent).await;
}
if let Err(e) =
fs::write(&full_path, &contents).await.with_context(|| {
format!("Failed to write file contents to {full_path:?}")
})
{
update.push_error_log("Write file", format_serror(&e.into()));
} else {
update.push_simple_log(
"Write file",
format!("File written to {full_path:?}"),
);
};
if !all_logs_success(&update.logs) {
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
if sync.config.files_on_host {
if let Err(e) = State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
update
.push_error_log("Refresh failed", format_serror(&e.into()));
}
update.finalize();
update.id = add_update(update.clone()).await?;
return Ok(update);
}
let commit_res = git::commit_file(
&format!("{}: Commit Resource File", user.username),
&root,
&resource_path.join(&file_path),
)
.await;
update.logs.extend(commit_res.logs);
if let Err(e) = State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
update
.push_error_log("Refresh failed", format_serror(&e.into()));
}
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<CommitSync, User> for State {
#[instrument(name = "CommitSync", skip(self, user))]
async fn resolve(
&self,
CommitSync { sync }: CommitSync,
user: User,
) -> anyhow::Result<Update> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Write)
.await?;
let file_contents_empty = sync.config.file_contents_empty();
let fresh_sync = !sync.config.files_on_host
&& sync.config.repo.is_empty()
&& file_contents_empty;
if !sync.config.managed && !fresh_sync {
return Err(anyhow!(
"Cannot commit to sync. Enabled 'managed' mode."
));
}
// Get this here so it can fail before update created.
let resource_path =
if sync.config.files_on_host || !sync.config.repo.is_empty() {
let resource_path = sync
.config
.resource_path
.first()
.context("Sync does not have resource path configured.")?
.parse::<PathBuf>()
.context("Invalid resource path")?;
if resource_path
.extension()
.context("Resource path missing '.toml' extension")?
!= "toml"
{
return Err(anyhow!(
"Resource path missing '.toml' extension"
));
}
Some(resource_path)
} else {
None
};
let res = State
.resolve(
ExportAllResourcesToToml {
tags: sync.config.match_tags.clone(),
},
sync_user().to_owned(),
)
.await?;
let mut update = make_update(&sync, Operation::CommitSync, &user);
update.id = add_update(update.clone()).await?;
update.logs.push(Log::simple("Resources", res.toml.clone()));
if sync.config.files_on_host {
let Some(resource_path) = resource_path else {
// Resource path checked above for files_on_host mode.
unreachable!()
};
let file_path = core_config()
.sync_directory
.join(to_komodo_name(&sync.name))
.join(&resource_path);
if let Some(parent) = file_path.parent() {
let _ = tokio::fs::create_dir_all(&parent).await;
};
if let Err(e) = tokio::fs::write(&file_path, &res.toml)
.await
.with_context(|| {
format!("Failed to write resource file to {file_path:?}",)
})
{
update.push_error_log(
"Write resource file",
format_serror(&e.into()),
);
update.finalize();
add_update(update.clone()).await?;
return Ok(update);
} else {
update.push_simple_log(
"Write contents",
format!("File contents written to {file_path:?}"),
);
}
} else if !sync.config.repo.is_empty() {
let Some(resource_path) = resource_path else {
// Resource path checked above for repo mode.
unreachable!()
};
// GIT REPO
let args: CloneArgs = (&sync).into();
let root = args.unique_path(&core_config().repo_directory)?;
match git::write_commit_file(
"Commit Sync",
&root,
&resource_path,
&res.toml,
)
.await
{
Ok(res) => update.logs.extend(res.logs),
Err(e) => {
update.push_error_log(
"Write resource file",
format_serror(&e.into()),
);
update.finalize();
add_update(update.clone()).await?;
return Ok(update);
}
}
// ===========
// UI DEFINED
} else if let Err(e) = db_client()
.resource_syncs
.update_one(
doc! { "name": &sync.name },
doc! { "$set": { "config.file_contents": res.toml } },
)
.await
.context("failed to update file_contents on db")
{
update.push_error_log(
"Write resource to database",
format_serror(&e.into()),
);
update.finalize();
add_update(update.clone()).await?;
return Ok(update);
}
if let Err(e) = State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
update.push_error_log(
"Refresh sync pending",
format_serror(&(&e).into()),
);
};
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RefreshResourceSyncPending, User> for State {
#[instrument(
name = "RefreshResourceSyncPending",
@@ -190,6 +485,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.servers,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -199,6 +496,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.stacks,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -208,6 +507,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.deployments,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -217,6 +518,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.builds,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -226,6 +529,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.repos,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -235,6 +540,19 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.procedures,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
)
.await?;
push_updates_for_view::<Action>(
resources.actions,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -244,6 +562,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.builders,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -253,6 +573,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.alerters,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -262,6 +584,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.server_templates,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -271,6 +595,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
resources.resource_syncs,
delete,
&all_resources,
None,
None,
&id_to_tags,
&sync.config.match_tags,
&mut diffs,
@@ -278,22 +604,26 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
.await?;
}
let variable_updates =
let variable_updates = if sync.config.match_tags.is_empty() {
crate::sync::variables::get_updates_for_view(
&resources.variables,
// Delete doesn't work with variables when match tags are set
sync.config.match_tags.is_empty() && delete,
delete,
)
.await?;
.await?
} else {
Default::default()
};
let user_group_updates =
let user_group_updates = if sync.config.match_tags.is_empty() {
crate::sync::user_groups::get_updates_for_view(
resources.user_groups,
// Delete doesn't work with user groups when match tags are set
sync.config.match_tags.is_empty() && delete,
delete,
&all_resources,
)
.await?;
.await?
} else {
Default::default()
};
anyhow::Ok((
diffs,
@@ -418,135 +748,6 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
}
}
impl Resolve<CommitSync, User> for State {
#[instrument(name = "CommitSync", skip(self, user))]
async fn resolve(
&self,
CommitSync { sync }: CommitSync,
user: User,
) -> anyhow::Result<ResourceSync> {
let sync = resource::get_check_permissions::<
entities::sync::ResourceSync,
>(&sync, &user, PermissionLevel::Write)
.await?;
let fresh_sync = !sync.config.files_on_host
&& sync.config.file_contents.is_empty()
&& sync.config.repo.is_empty();
if !sync.config.managed && !fresh_sync {
return Err(anyhow!(
"Cannot commit to sync. Enabled 'managed' mode."
));
}
let res = State
.resolve(
ExportAllResourcesToToml {
tags: sync.config.match_tags,
},
sync_user().to_owned(),
)
.await?;
let mut update = make_update(
ResourceTarget::ResourceSync(sync.id),
Operation::CommitSync,
&user,
);
update.id = add_update(update.clone()).await?;
if sync.config.files_on_host {
let path = sync
.config
.resource_path
.parse::<PathBuf>()
.context("Resource path is not valid file path")?;
let extension = path
.extension()
.context("Resource path missing '.toml' extension")?;
if extension != "toml" {
return Err(anyhow!("Wrong file extension. Expected '.toml', got '.{extension:?}'"));
}
if let Some(parent) = path.parent() {
let _ = tokio::fs::create_dir_all(&parent).await;
};
if let Err(e) =
tokio::fs::write(&sync.config.resource_path, &res.toml)
.await
.with_context(|| {
format!(
"Failed to write resource file to {}",
sync.config.resource_path
)
})
{
update.push_error_log(
"Write resource file",
format_serror(&e.into()),
);
update.finalize();
add_update(update).await?;
return resource::get::<ResourceSync>(&sync.name).await;
}
} else if let Err(e) = db_client()
.resource_syncs
.update_one(
doc! { "name": &sync.name },
doc! { "$set": { "config.file_contents": &res.toml } },
)
.await
.context("failed to update file_contents on db")
{
update.push_error_log(
"Write resource to database",
format_serror(&e.into()),
);
update.finalize();
add_update(update).await?;
return resource::get::<ResourceSync>(&sync.name).await;
}
update
.logs
.push(Log::simple("Committed resources", res.toml));
let res = match State
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
.await
{
Ok(sync) => Ok(sync),
Err(e) => {
update.push_error_log(
"Refresh sync pending",
format_serror(&(&e).into()),
);
Err(e)
}
};
update.finalize();
// Need to manually update the update before cache refresh,
// and before broadcast with add_update.
// The Err case of to_document should be unreachable,
// but will fail to update cache in that case.
if let Ok(update_doc) = to_document(&update) {
let _ = update_one_by_id(
&db_client().updates,
&update.id,
mungos::update::Update::Set(update_doc),
None,
)
.await;
refresh_resource_sync_state_cache().await;
}
update_update(update).await?;
res
}
}
impl Resolve<CreateSyncWebhook, User> for State {
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
async fn resolve(

View File

@@ -7,7 +7,7 @@ use komodo_client::{
UpdateTagsOnResourceResponse,
},
entities::{
alerter::Alerter, build::Build, builder::Builder,
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, stack::Stack,
@@ -182,6 +182,15 @@ impl Resolve<UpdateTagsOnResource, User> for State {
.await?;
resource::update_tags::<Procedure>(&id, tags, user).await?
}
ResourceTarget::Action(id) => {
resource::get_check_permissions::<Action>(
&id,
&user,
PermissionLevel::Write,
)
.await?;
resource::update_tags::<Action>(&id, tags, user).await?
}
ResourceTarget::ServerTemplate(id) => {
resource::get_check_permissions::<ServerTemplate>(
&id,

View File

@@ -0,0 +1,130 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use komodo_client::{
api::write::{
DeleteUser, DeleteUserResponse, UpdateUserPassword,
UpdateUserPasswordResponse, UpdateUserUsername,
UpdateUserUsernameResponse,
},
entities::{
user::{User, UserConfig},
NoData,
},
};
use mungos::mongodb::bson::{doc, oid::ObjectId};
use resolver_api::Resolve;
use crate::{
helpers::hash_password,
state::{db_client, State},
};
//
impl Resolve<UpdateUserUsername, User> for State {
async fn resolve(
&self,
UpdateUserUsername { username }: UpdateUserUsername,
user: User,
) -> anyhow::Result<UpdateUserUsernameResponse> {
if username.is_empty() {
return Err(anyhow!("Username cannot be empty."));
}
let db = db_client();
if db
.users
.find_one(doc! { "username": &username })
.await
.context("Failed to query for existing users")?
.is_some()
{
return Err(anyhow!("Username already taken."));
}
let id = ObjectId::from_str(&user.id)
.context("User id not valid ObjectId.")?;
db.users
.update_one(
doc! { "_id": id },
doc! { "$set": { "username": username } },
)
.await
.context("Failed to update user username on database.")?;
Ok(NoData {})
}
}
//
impl Resolve<UpdateUserPassword, User> for State {
async fn resolve(
&self,
UpdateUserPassword { password }: UpdateUserPassword,
user: User,
) -> anyhow::Result<UpdateUserPasswordResponse> {
let UserConfig::Local { .. } = user.config else {
return Err(anyhow!("User is not local user"));
};
if password.is_empty() {
return Err(anyhow!("Password cannot be empty."));
}
let id = ObjectId::from_str(&user.id)
.context("User id not valid ObjectId.")?;
let hashed_password = hash_password(password)?;
db_client()
.users
.update_one(
doc! { "_id": id },
doc! { "$set": {
"config.data.password": hashed_password
} },
)
.await
.context("Failed to update user password on database.")?;
Ok(NoData {})
}
}
//
impl Resolve<DeleteUser, User> for State {
async fn resolve(
&self,
DeleteUser { user }: DeleteUser,
admin: User,
) -> anyhow::Result<DeleteUserResponse> {
if !admin.admin {
return Err(anyhow!("Calling user is not admin."));
}
if admin.username == user || admin.id == user {
return Err(anyhow!("User cannot delete themselves."));
}
let query = if let Ok(id) = ObjectId::from_str(&user) {
doc! { "_id": id }
} else {
doc! { "username": user }
};
let db = db_client();
let Some(user) = db
.users
.find_one(query.clone())
.await
.context("Failed to query database for users.")?
else {
return Err(anyhow!("No user found with given id / username"));
};
if user.super_admin {
return Err(anyhow!("Cannot delete a super admin user."));
}
if user.admin && !admin.super_admin {
return Err(anyhow!(
"Only a Super Admin can delete an admin user."
));
}
db.users
.delete_one(query)
.await
.context("Failed to delete user from database")?;
Ok(user)
}
}

View File

@@ -81,7 +81,7 @@ impl Resolve<UpdateVariableValue, User> for State {
let variable = get_variable(&name).await?;
if value == variable.value {
return Err(anyhow!("no change"));
return Ok(variable);
}
db_client()

View File

@@ -16,12 +16,10 @@ use resolver_api::Resolve;
use crate::{
config::core_config,
state::State,
state::{db_client, jwt_client},
helpers::hash_password,
state::{db_client, jwt_client, State},
};
const BCRYPT_COST: u32 = 10;
impl Resolve<CreateLocalUser, HeaderMap> for State {
#[instrument(name = "CreateLocalUser", skip(self))]
async fn resolve(
@@ -47,8 +45,7 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
return Err(anyhow!("Password cannot be empty string"));
}
let password = bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")?;
let hashed_password = hash_password(password)?;
let no_users_exist =
db_client().users.find_one(Document::new()).await?.is_none();
@@ -71,7 +68,9 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
config: UserConfig::Local { password },
config: UserConfig::Local {
password: hashed_password,
},
};
let user_id = db_client()

View File

@@ -144,9 +144,15 @@ pub fn core_config() -> &'static CoreConfig {
jwt_ttl: env
.komodo_jwt_ttl
.unwrap_or(config.jwt_ttl),
sync_directory: env
.komodo_sync_directory
.unwrap_or(config.sync_directory),
repo_directory: env
.komodo_repo_directory
.unwrap_or(config.repo_directory),
action_directory: env
.komodo_action_directory
.unwrap_or(config.action_directory),
resource_poll_interval: env
.komodo_resource_poll_interval
.unwrap_or(config.resource_poll_interval),

View File

@@ -1,4 +1,5 @@
use komodo_client::entities::{
action::Action,
alert::Alert,
alerter::Alerter,
api_key::ApiKey,
@@ -47,6 +48,7 @@ pub struct DbClient {
pub builders: Collection<Builder>,
pub repos: Collection<Repo>,
pub procedures: Collection<Procedure>,
pub actions: Collection<Action>,
pub alerters: Collection<Alerter>,
pub server_templates: Collection<ServerTemplate>,
pub resource_syncs: Collection<ResourceSync>,
@@ -115,6 +117,7 @@ impl DbClient {
repos: resource_collection(&db, "Repo").await?,
alerters: resource_collection(&db, "Alerter").await?,
procedures: resource_collection(&db, "Procedure").await?,
actions: resource_collection(&db, "Action").await?,
server_templates: resource_collection(&db, "ServerTemplate")
.await?,
resource_syncs: resource_collection(&db, "ResourceSync")

View File

@@ -4,7 +4,8 @@ use anyhow::anyhow;
use komodo_client::{
busy::Busy,
entities::{
build::BuildActionState, deployment::DeploymentActionState,
action::ActionActionState, build::BuildActionState,
deployment::DeploymentActionState,
procedure::ProcedureActionState, repo::RepoActionState,
server::ServerActionState, stack::StackActionState,
sync::ResourceSyncActionState,
@@ -22,6 +23,7 @@ pub struct ActionStates {
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
pub procedure:
Cache<String, Arc<ActionState<ProcedureActionState>>>,
pub action: Cache<String, Arc<ActionState<ActionActionState>>>,
pub resource_sync:
Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,

View File

@@ -1,4 +1,4 @@
use std::{str::FromStr, time::Duration};
use std::str::FromStr;
use anyhow::{anyhow, Context};
use futures::future::join_all;
@@ -54,10 +54,6 @@ pub fn empty_or_only_spaces(word: &str) -> bool {
true
}
pub fn random_duration(min_ms: u64, max_ms: u64) -> Duration {
Duration::from_millis(thread_rng().gen_range(min_ms..max_ms))
}
pub fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
@@ -66,6 +62,15 @@ pub fn random_string(length: usize) -> String {
.collect()
}
const BCRYPT_COST: u32 = 10;
pub fn hash_password<P>(password: P) -> anyhow::Result<String>
where
P: AsRef<[u8]>,
{
bcrypt::hash(password, BCRYPT_COST)
.context("failed to hash password")
}
/// First checks db for token, then checks core config.
/// Only errors if db call errors.
/// Returns (token, use_https)

View File

@@ -146,6 +146,22 @@ async fn execute_execution(
)
.await?
}
Execution::RunAction(req) => {
let req = ExecuteRequest::RunAction(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunAction(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("Failed at RunAction"),
&update_id,
)
.await?
}
Execution::RunBuild(req) => {
let req = ExecuteRequest::RunBuild(req);
let update = init_execution_update(&req, &user).await?;
@@ -706,6 +722,11 @@ async fn execute_execution(
)
.await?
}
// Exception: This is a write operation.
Execution::CommitSync(req) => State
.resolve(req, user)
.await
.context("Failed at CommitSync")?,
Execution::DeployStack(req) => {
let req = ExecuteRequest::DeployStack(req);
let update = init_execution_update(&req, &user).await?;
@@ -722,6 +743,22 @@ async fn execute_execution(
)
.await?
}
Execution::DeployStackIfChanged(req) => {
let req = ExecuteRequest::DeployStackIfChanged(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStackIfChanged(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("Failed at DeployStackIfChanged"),
&update_id,
)
.await?
}
Execution::StartStack(req) => {
let req = ExecuteRequest::StartStack(req);
let update = init_execution_update(&req, &user).await?;

View File

@@ -2,6 +2,7 @@ use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use komodo_client::entities::{
action::Action,
alerter::Alerter,
build::Build,
builder::Builder,
@@ -201,6 +202,14 @@ pub async fn get_tag_check_owner(
Err(anyhow!("user must be tag owner or admin"))
}
pub async fn get_all_tags(
filter: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<Tag>> {
find_collect(&db_client().tags, filter, None)
.await
.context("failed to query db for tags")
}
pub async fn get_id_to_tags(
filter: impl Into<Option<Document>>,
) -> anyhow::Result<HashMap<String, Tag>> {
@@ -283,6 +292,9 @@ pub async fn get_user_permission_on_target(
ResourceTarget::Procedure(id) => {
get_user_permission_on_resource::<Procedure>(user, id).await
}
ResourceTarget::Action(id) => {
get_user_permission_on_resource::<Action>(user, id).await
}
ResourceTarget::ServerTemplate(id) => {
get_user_permission_on_resource::<ServerTemplate>(user, id)
.await

View File

@@ -1,5 +1,6 @@
use anyhow::Context;
use komodo_client::entities::{
action::Action,
build::Build,
deployment::Deployment,
komodo_timestamp,
@@ -345,6 +346,14 @@ pub async fn init_execution_update(
),
),
// Action
ExecuteRequest::RunAction(data) => (
Operation::RunAction,
ResourceTarget::Action(
resource::get::<Action>(&data.action).await?.id,
),
),
// Server template
ExecuteRequest::LaunchServer(data) => (
Operation::LaunchServer,
@@ -370,6 +379,12 @@ pub async fn init_execution_update(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::DeployStackIfChanged(data) => (
Operation::DeployStack,
ResourceTarget::Stack(
resource::get::<Stack>(&data.stack).await?.id,
),
),
ExecuteRequest::StartStack(data) => (
if data.service.is_some() {
Operation::StartStackService
@@ -429,7 +444,10 @@ pub async fn init_execution_update(
};
let mut update = make_update(target, operation, user);
update.in_progress();
// Don't actually send it here, let the handlers send it after they can set action state.
update.id = add_update_without_send(&update).await?;
// Hold off on even adding update for DeployStackIfChanged
if !matches!(&request, ExecuteRequest::DeployStackIfChanged(_)) {
// Don't actually send it here, let the handlers send it after they can set action state.
update.id = add_update_without_send(&update).await?;
}
Ok(update)
}

View File

@@ -1,56 +0,0 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use komodo_client::{
api::execute::RunBuild,
entities::{build::Build, user::git_webhook_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_build_webhook(
build_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build_id).await;
let _lock = lock.lock().await;
let build = resource::get::<Build>(&build_id).await?;
verify_gh_signature(headers, &body, &build.config.webhook_secret)
.await?;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -1,263 +0,0 @@
use std::sync::Arc;
use anyhow::{anyhow, Context};
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use hex::ToHex;
use hmac::{Hmac, Mac};
use serde::Deserialize;
use sha2::Sha256;
use tokio::sync::Mutex;
use tracing::Instrument;
use crate::{
config::core_config,
helpers::{cache::Cache, random_duration},
};
mod build;
mod procedure;
mod repo;
mod stack;
mod sync;
type HmacSha256 = Hmac<Sha256>;
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct IdBranch {
id: String,
branch: Option<String>,
}
pub fn router() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("build_webhook", id);
async {
let res = build::handle_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run build webook for build {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
),
)
.route(
"/repo/:id/clone",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_clone_webhook", id);
async {
let res = repo::handle_repo_clone_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo clone webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/pull",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_pull_webhook", id);
async {
let res = repo::handle_repo_pull_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo pull webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/repo/:id/build",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("repo_build_webhook", id);
async {
let res = repo::handle_repo_build_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run repo build webook for repo {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/stack/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("stack_clone_webhook", id);
async {
let res = stack::handle_stack_refresh_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run stack clone webook for stack {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/stack/:id/deploy",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("stack_pull_webhook", id);
async {
let res = stack::handle_stack_deploy_webhook(id.clone(), headers, body).await;
if let Err(e) = res {
warn!("failed to run stack pull webook for stack {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/procedure/:id/:branch",
post(
|Path(IdBranch { id, branch }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("procedure_webhook", id, branch);
async {
let res = procedure::handle_procedure_webhook(
id.clone(),
branch.unwrap_or_else(|| String::from("main")),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run procedure webook for procedure {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/refresh",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_refresh_webhook", id);
async {
let res = sync::handle_sync_refresh_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
.route(
"/sync/:id/sync",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
tokio::spawn(async move {
let span = info_span!("sync_execute_webhook", id);
async {
let res = sync::handle_sync_execute_webhook(
id.clone(),
headers,
body
).await;
if let Err(e) = res {
warn!("failed to run sync webook for sync {id} | {e:#}");
}
}
.instrument(span)
.await
});
},
)
)
}
#[instrument(skip_all)]
async fn verify_gh_signature(
headers: HeaderMap,
body: &str,
custom_secret: &str,
) -> anyhow::Result<()> {
// wait random amount of time
tokio::time::sleep(random_duration(0, 500)).await;
let signature = headers.get("x-hub-signature-256");
if signature.is_none() {
return Err(anyhow!("no signature in headers"));
}
let signature = signature.unwrap().to_str();
if signature.is_err() {
return Err(anyhow!("failed to unwrap signature"));
}
let signature = signature.unwrap().replace("sha256=", "");
let secret_bytes = if custom_secret.is_empty() {
core_config().webhook_secret.as_bytes()
} else {
custom_secret.as_bytes()
};
let mut mac = HmacSha256::new_from_slice(secret_bytes)
.expect("github webhook | failed to create hmac sha256");
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("signature does not equal expected"))
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
fn extract_branch(body: &str) -> anyhow::Result<String> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("failed to parse github request body")?
.branch
.replace("refs/heads/", "");
Ok(branch)
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;

View File

@@ -1,64 +0,0 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use komodo_client::{
api::execute::RunProcedure,
entities::{procedure::Procedure, user::git_webhook_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn procedure_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_procedure_webhook(
procedure_id: String,
target_branch: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure_id).await;
let _lock = lock.lock().await;
let procedure = resource::get::<Procedure>(&procedure_id).await?;
verify_gh_signature(
headers,
&body,
&procedure.config.webhook_secret,
)
.await?;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != target_branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunProcedure(RunProcedure {
procedure: procedure_id,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -1,135 +0,0 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use komodo_client::{
api::execute::{BuildRepo, CloneRepo, PullRepo},
entities::{repo::Repo, user::git_webhook_user},
};
use resolver_api::Resolve;
use crate::{
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
pub async fn handle_repo_clone_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
let repo = resource::get::<Repo>(&repo_id).await?;
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
.await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
pub async fn handle_repo_pull_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
let repo = resource::get::<Repo>(&repo_id).await?;
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
.await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req = crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
pub async fn handle_repo_build_webhook(
repo_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo_id).await;
let _lock = lock.lock().await;
let repo = resource::get::<Repo>(&repo_id).await?;
verify_gh_signature(headers, &body, &repo.config.webhook_secret)
.await?;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != repo.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::BuildRepo(BuildRepo {
repo: repo_id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::BuildRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -1,91 +0,0 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use komodo_client::{
api::{execute::DeployStack, write::RefreshStackCache},
entities::{stack::Stack, user::git_webhook_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn stack_locks() -> &'static ListenerLockCache {
static STACK_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
STACK_LOCKS.get_or_init(Default::default)
}
pub async fn handle_stack_refresh_webhook(
stack_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through, from "action state busy".
let lock = stack_locks().get_or_insert_default(&stack_id).await;
let _lock = lock.lock().await;
let stack = resource::get::<Stack>(&stack_id).await?;
verify_gh_signature(headers, &body, &stack.config.webhook_secret)
.await?;
if !stack.config.webhook_enabled {
return Err(anyhow!("stack does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != stack.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
State
.resolve(RefreshStackCache { stack: stack.id }, user)
.await?;
Ok(())
}
pub async fn handle_stack_deploy_webhook(
stack_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = stack_locks().get_or_insert_default(&stack_id).await;
let _lock = lock.lock().await;
let stack = resource::get::<Stack>(&stack_id).await?;
verify_gh_signature(headers, &body, &stack.config.webhook_secret)
.await?;
if !stack.config.webhook_enabled {
return Err(anyhow!("stack does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != stack.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack_id,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStack(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -1,88 +0,0 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use axum::http::HeaderMap;
use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{sync::ResourceSync, user::git_webhook_user},
};
use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, resource, state::State,
};
use super::{extract_branch, verify_gh_signature, ListenerLockCache};
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}
pub async fn handle_sync_refresh_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
verify_gh_signature(headers, &body, &sync.config.webhook_secret)
.await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync_id }, user)
.await?;
Ok(())
}
pub async fn handle_sync_execute_webhook(
sync_id: String,
headers: HeaderMap,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync_id).await;
let _lock = lock.lock().await;
let sync = resource::get::<ResourceSync>(&sync_id).await?;
verify_gh_signature(headers, &body, &sync.config.webhook_secret)
.await?;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
let request_branch = extract_branch(&body)?;
if request_branch != sync.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunSync(RunSync { sync: sync_id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,71 @@
use anyhow::{anyhow, Context};
use axum::http::HeaderMap;
use hex::ToHex;
use hmac::{Hmac, Mac};
use serde::Deserialize;
use sha2::Sha256;
use crate::{
config::core_config,
listener::{VerifyBranch, VerifySecret},
};
type HmacSha256 = Hmac<Sha256>;
/// Listener implementation for Github type API, including Gitea
pub struct Github;
impl VerifySecret for Github {
#[instrument("VerifyGithubSecret", skip_all)]
fn verify_secret(
headers: HeaderMap,
body: &str,
custom_secret: &str,
) -> anyhow::Result<()> {
let signature = headers
.get("x-hub-signature-256")
.context("No github signature in headers")?;
let signature = signature
.to_str()
.context("Failed to get signature as string")?;
let signature =
signature.strip_prefix("sha256=").unwrap_or(signature);
let secret_bytes = if custom_secret.is_empty() {
core_config().webhook_secret.as_bytes()
} else {
custom_secret.as_bytes()
};
let mut mac = HmacSha256::new_from_slice(secret_bytes)
.context("Failed to create hmac sha256 from secret")?;
mac.update(body.as_bytes());
let expected = mac.finalize().into_bytes().encode_hex::<String>();
if signature == expected {
Ok(())
} else {
Err(anyhow!("Signature does not equal expected"))
}
}
}
#[derive(Deserialize)]
struct GithubWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
impl VerifyBranch for Github {
fn verify_branch(
body: &str,
expected_branch: &str,
) -> anyhow::Result<()> {
let branch = serde_json::from_str::<GithubWebhookBody>(body)
.context("Failed to parse github request body")?
.branch
.replace("refs/heads/", "");
if branch == expected_branch {
Ok(())
} else {
Err(anyhow!("request branch does not match expected"))
}
}
}

View File

@@ -0,0 +1,58 @@
use anyhow::{anyhow, Context};
use serde::Deserialize;
use crate::{
config::core_config,
listener::{VerifyBranch, VerifySecret},
};
/// Listener implementation for Gitlab type API
pub struct Gitlab;
impl VerifySecret for Gitlab {
#[instrument("VerifyGitlabSecret", skip_all)]
fn verify_secret(
headers: axum::http::HeaderMap,
_body: &str,
custom_secret: &str,
) -> anyhow::Result<()> {
let token = headers
.get("x-gitlab-token")
.context("No gitlab token in headers")?;
let token =
token.to_str().context("Failed to get token as string")?;
let secret = if custom_secret.is_empty() {
core_config().webhook_secret.as_str()
} else {
custom_secret
};
if token == secret {
Ok(())
} else {
Err(anyhow!("Webhook secret does not match expected."))
}
}
}
#[derive(Deserialize)]
struct GitlabWebhookBody {
#[serde(rename = "ref")]
branch: String,
}
impl VerifyBranch for Gitlab {
fn verify_branch(
body: &str,
expected_branch: &str,
) -> anyhow::Result<()> {
let branch = serde_json::from_str::<GitlabWebhookBody>(body)
.context("Failed to parse gitlab request body")?
.branch
.replace("refs/heads/", "");
if branch == expected_branch {
Ok(())
} else {
Err(anyhow!("request branch does not match expected"))
}
}
}

View File

@@ -0,0 +1,2 @@
pub mod github;
pub mod gitlab;

View File

@@ -1,7 +1,52 @@
use axum::Router;
use std::sync::Arc;
mod github;
use axum::{http::HeaderMap, Router};
use komodo_client::entities::resource::Resource;
use tokio::sync::Mutex;
use crate::{helpers::cache::Cache, resource::KomodoResource};
mod integrations;
mod resources;
mod router;
use integrations::*;
pub fn router() -> Router {
Router::new().nest("/github", github::router())
Router::new()
.nest("/github", router::router::<github::Github>())
.nest("/gitlab", router::router::<gitlab::Gitlab>())
}
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;
/// Implemented for all resources which can recieve webhook.
trait CustomSecret: KomodoResource {
fn custom_secret(
resource: &Resource<Self::Config, Self::Info>,
) -> &str;
}
/// Implemented on the integration struct, eg [integrations::github::Github]
trait VerifySecret {
fn verify_secret(
headers: HeaderMap,
body: &str,
custom_secret: &str,
) -> anyhow::Result<()>;
}
/// Implemented on the integration struct, eg [integrations::github::Github]
trait VerifyBranch {
/// Returns Err if the branch extracted from request
/// body does not match the expected branch.
fn verify_branch(
body: &str,
expected_branch: &str,
) -> anyhow::Result<()>;
}
/// For Procedures and Actions, incoming webhook
/// can be triggered by any branch by using `__ANY__`
/// as the branch in the webhook URL.
const ANY_BRANCH: &str = "__ANY__";

View File

@@ -0,0 +1,486 @@
use std::sync::OnceLock;
use anyhow::anyhow;
use komodo_client::{
api::{
execute::*,
write::{RefreshResourceSyncPending, RefreshStackCache},
},
entities::{
action::Action, build::Build, procedure::Procedure, repo::Repo,
stack::Stack, sync::ResourceSync, user::git_webhook_user,
},
};
use resolver_api::Resolve;
use serde::Deserialize;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update, state::State,
};
use super::{ListenerLockCache, ANY_BRANCH};
// =======
// BUILD
// =======
impl super::CustomSecret for Build {
fn custom_secret(resource: &Self) -> &str {
&resource.config.webhook_secret
}
}
fn build_locks() -> &'static ListenerLockCache {
static BUILD_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
BUILD_LOCKS.get_or_init(Default::default)
}
pub async fn handle_build_webhook<B: super::VerifyBranch>(
build: Build,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = build_locks().get_or_insert_default(&build.id).await;
let _lock = lock.lock().await;
if !build.config.webhook_enabled {
return Err(anyhow!("build does not have webhook enabled"));
}
B::verify_branch(&body, &build.config.branch)?;
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build.id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunBuild(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
// ======
// REPO
// ======
impl super::CustomSecret for Repo {
fn custom_secret(resource: &Self) -> &str {
&resource.config.webhook_secret
}
}
fn repo_locks() -> &'static ListenerLockCache {
static REPO_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
REPO_LOCKS.get_or_init(Default::default)
}
pub trait RepoExecution {
async fn resolve(repo: Repo) -> anyhow::Result<()>;
}
impl RepoExecution for CloneRepo {
async fn resolve(repo: Repo) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::CloneRepo(CloneRepo {
repo: repo.id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::CloneRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
}
impl RepoExecution for PullRepo {
async fn resolve(repo: Repo) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::PullRepo(PullRepo {
repo: repo.id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::PullRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
}
impl RepoExecution for BuildRepo {
async fn resolve(repo: Repo) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
let req =
crate::api::execute::ExecuteRequest::BuildRepo(BuildRepo {
repo: repo.id,
});
let update = init_execution_update(&req, &user).await?;
let crate::api::execute::ExecuteRequest::BuildRepo(req) = req
else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
}
#[derive(Deserialize)]
pub struct RepoWebhookPath {
pub option: RepoWebhookOption,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RepoWebhookOption {
Clone,
Pull,
Build,
}
pub async fn handle_repo_webhook<B: super::VerifyBranch>(
option: RepoWebhookOption,
repo: Repo,
body: String,
) -> anyhow::Result<()> {
match option {
RepoWebhookOption::Clone => {
handle_repo_webhook_inner::<B, CloneRepo>(repo, body).await
}
RepoWebhookOption::Pull => {
handle_repo_webhook_inner::<B, PullRepo>(repo, body).await
}
RepoWebhookOption::Build => {
handle_repo_webhook_inner::<B, BuildRepo>(repo, body).await
}
}
}
async fn handle_repo_webhook_inner<
B: super::VerifyBranch,
E: RepoExecution,
>(
repo: Repo,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = repo_locks().get_or_insert_default(&repo.id).await;
let _lock = lock.lock().await;
if !repo.config.webhook_enabled {
return Err(anyhow!("repo does not have webhook enabled"));
}
B::verify_branch(&body, &repo.config.branch)?;
E::resolve(repo).await
}
// =======
// STACK
// =======
impl super::CustomSecret for Stack {
fn custom_secret(resource: &Self) -> &str {
&resource.config.webhook_secret
}
}
fn stack_locks() -> &'static ListenerLockCache {
static STACK_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
STACK_LOCKS.get_or_init(Default::default)
}
pub trait StackExecution {
async fn resolve(stack: Stack) -> anyhow::Result<()>;
}
impl StackExecution for RefreshStackCache {
async fn resolve(stack: Stack) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
State
.resolve(RefreshStackCache { stack: stack.id }, user)
.await?;
Ok(())
}
}
impl StackExecution for DeployStack {
async fn resolve(stack: Stack) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
if stack.config.webhook_force_deploy {
let req = ExecuteRequest::DeployStack(DeployStack {
stack: stack.id,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStack(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
} else {
let req =
ExecuteRequest::DeployStackIfChanged(DeployStackIfChanged {
stack: stack.id,
stop_time: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeployStackIfChanged(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
}
Ok(())
}
}
#[derive(Deserialize)]
pub struct StackWebhookPath {
pub option: StackWebhookOption,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum StackWebhookOption {
Refresh,
Deploy,
}
pub async fn handle_stack_webhook<B: super::VerifyBranch>(
option: StackWebhookOption,
stack: Stack,
body: String,
) -> anyhow::Result<()> {
match option {
StackWebhookOption::Refresh => {
handle_stack_webhook_inner::<B, RefreshStackCache>(stack, body)
.await
}
StackWebhookOption::Deploy => {
handle_stack_webhook_inner::<B, DeployStack>(stack, body).await
}
}
}
pub async fn handle_stack_webhook_inner<
B: super::VerifyBranch,
E: StackExecution,
>(
stack: Stack,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through, from "action state busy".
let lock = stack_locks().get_or_insert_default(&stack.id).await;
let _lock = lock.lock().await;
if !stack.config.webhook_enabled {
return Err(anyhow!("stack does not have webhook enabled"));
}
B::verify_branch(&body, &stack.config.branch)?;
E::resolve(stack).await
}
// ======
// SYNC
// ======
impl super::CustomSecret for ResourceSync {
fn custom_secret(resource: &Self) -> &str {
&resource.config.webhook_secret
}
}
fn sync_locks() -> &'static ListenerLockCache {
static SYNC_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
SYNC_LOCKS.get_or_init(Default::default)
}
pub trait SyncExecution {
async fn resolve(sync: ResourceSync) -> anyhow::Result<()>;
}
impl SyncExecution for RefreshResourceSyncPending {
async fn resolve(sync: ResourceSync) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
State
.resolve(RefreshResourceSyncPending { sync: sync.id }, user)
.await?;
Ok(())
}
}
impl SyncExecution for RunSync {
async fn resolve(sync: ResourceSync) -> anyhow::Result<()> {
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunSync(RunSync {
sync: sync.id,
resource_type: None,
resources: None,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunSync(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
}
#[derive(Deserialize)]
pub struct SyncWebhookPath {
pub option: SyncWebhookOption,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SyncWebhookOption {
Refresh,
Sync,
}
pub async fn handle_sync_webhook<B: super::VerifyBranch>(
option: SyncWebhookOption,
sync: ResourceSync,
body: String,
) -> anyhow::Result<()> {
match option {
SyncWebhookOption::Refresh => {
handle_sync_webhook_inner::<B, RefreshResourceSyncPending>(
sync, body,
)
.await
}
SyncWebhookOption::Sync => {
handle_sync_webhook_inner::<B, RunSync>(sync, body).await
}
}
}
async fn handle_sync_webhook_inner<
B: super::VerifyBranch,
E: SyncExecution,
>(
sync: ResourceSync,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = sync_locks().get_or_insert_default(&sync.id).await;
let _lock = lock.lock().await;
if !sync.config.webhook_enabled {
return Err(anyhow!("sync does not have webhook enabled"));
}
B::verify_branch(&body, &sync.config.branch)?;
E::resolve(sync).await
}
// ===========
// PROCEDURE
// ===========
impl super::CustomSecret for Procedure {
fn custom_secret(resource: &Self) -> &str {
&resource.config.webhook_secret
}
}
fn procedure_locks() -> &'static ListenerLockCache {
static PROCEDURE_LOCKS: OnceLock<ListenerLockCache> =
OnceLock::new();
PROCEDURE_LOCKS.get_or_init(Default::default)
}
pub async fn handle_procedure_webhook<B: super::VerifyBranch>(
procedure: Procedure,
target_branch: String,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock =
procedure_locks().get_or_insert_default(&procedure.id).await;
let _lock = lock.lock().await;
if !procedure.config.webhook_enabled {
return Err(anyhow!("procedure does not have webhook enabled"));
}
if target_branch != ANY_BRANCH {
B::verify_branch(&body, &target_branch)?;
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunProcedure(RunProcedure {
procedure: procedure.id,
});
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunProcedure(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}
// ========
// ACTION
// ========
impl super::CustomSecret for Action {
fn custom_secret(resource: &Self) -> &str {
&resource.config.webhook_secret
}
}
fn action_locks() -> &'static ListenerLockCache {
static ACTION_LOCKS: OnceLock<ListenerLockCache> = OnceLock::new();
ACTION_LOCKS.get_or_init(Default::default)
}
pub async fn handle_action_webhook<B: super::VerifyBranch>(
action: Action,
target_branch: String,
body: String,
) -> anyhow::Result<()> {
// Acquire and hold lock to make a task queue for
// subsequent listener calls on same resource.
// It would fail if we let it go through from action state busy.
let lock = action_locks().get_or_insert_default(&action.id).await;
let _lock = lock.lock().await;
if !action.config.webhook_enabled {
return Err(anyhow!("action does not have webhook enabled"));
}
if target_branch != ANY_BRANCH {
B::verify_branch(&body, &target_branch)?;
}
let user = git_webhook_user().to_owned();
let req =
ExecuteRequest::RunAction(RunAction { action: action.id });
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RunAction(req) = req else {
unreachable!()
};
State.resolve(req, (user, update)).await?;
Ok(())
}

View File

@@ -0,0 +1,208 @@
use axum::{extract::Path, http::HeaderMap, routing::post, Router};
use komodo_client::entities::{
action::Action, build::Build, procedure::Procedure, repo::Repo,
resource::Resource, stack::Stack, sync::ResourceSync,
};
use serde::Deserialize;
use tracing::Instrument;
use crate::resource::KomodoResource;
use super::{
resources::{
handle_action_webhook, handle_build_webhook,
handle_procedure_webhook, handle_repo_webhook,
handle_stack_webhook, handle_sync_webhook, RepoWebhookPath,
StackWebhookPath, SyncWebhookPath,
},
CustomSecret, VerifyBranch, VerifySecret,
};
#[derive(Deserialize)]
struct Id {
id: String,
}
#[derive(Deserialize)]
struct Branch {
#[serde(default = "default_branch")]
branch: String,
}
fn default_branch() -> String {
String::from("main")
}
pub fn router<P: VerifySecret + VerifyBranch>() -> Router {
Router::new()
.route(
"/build/:id",
post(
|Path(Id { id }), headers: HeaderMap, body: String| async move {
let build =
auth_webhook::<P, Build>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("BuildWebhook", id);
async {
let res = handle_build_webhook::<P>(
build, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for build {id} | {e:#}"
);
}
}
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
.route(
"/repo/:id/:option",
post(
|Path(Id { id }), Path(RepoWebhookPath { option }), headers: HeaderMap, body: String| async move {
let repo =
auth_webhook::<P, Repo>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("RepoWebhook", id);
async {
let res = handle_repo_webhook::<P>(
option, repo, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for repo {id} | {e:#}"
);
}
}
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
.route(
"/stack/:id/:option",
post(
|Path(Id { id }), Path(StackWebhookPath { option }), headers: HeaderMap, body: String| async move {
let stack =
auth_webhook::<P, Stack>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("StackWebhook", id);
async {
let res = handle_stack_webhook::<P>(
option, stack, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for stack {id} | {e:#}"
);
}
}
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
.route(
"/sync/:id/:option",
post(
|Path(Id { id }), Path(SyncWebhookPath { option }), headers: HeaderMap, body: String| async move {
let sync =
auth_webhook::<P, ResourceSync>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("ResourceSyncWebhook", id);
async {
let res = handle_sync_webhook::<P>(
option, sync, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for resource sync {id} | {e:#}"
);
}
}
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
.route(
"/procedure/:id/:branch",
post(
|Path(Id { id }), Path(Branch { branch }), headers: HeaderMap, body: String| async move {
let procedure =
auth_webhook::<P, Procedure>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("ProcedureWebhook", id);
async {
let res = handle_procedure_webhook::<P>(
procedure, branch, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for procedure {id} | {e:#}"
);
}
}
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
.route(
"/action/:id/:branch",
post(
|Path(Id { id }), Path(Branch { branch }), headers: HeaderMap, body: String| async move {
let action =
auth_webhook::<P, Action>(&id, headers, &body).await?;
tokio::spawn(async move {
let span = info_span!("ActionWebhook", id);
async {
let res = handle_action_webhook::<P>(
action, branch, body,
)
.await;
if let Err(e) = res {
warn!(
"Failed at running webhook for action {id} | {e:#}"
);
}
}
.instrument(span)
.await
});
serror::Result::Ok(())
},
),
)
}
async fn auth_webhook<P, R>(
id: &str,
headers: HeaderMap,
body: &str,
) -> serror::Result<Resource<R::Config, R::Info>>
where
P: VerifySecret,
R: KomodoResource + CustomSecret,
{
let resource = crate::resource::get::<R>(id).await?;
P::verify_secret(headers, body, R::custom_secret(&resource))?;
Ok(resource)
}

View File

@@ -26,6 +26,7 @@ mod resource;
mod stack;
mod state;
mod sync;
mod ts_client;
mod ws;
async fn app() -> anyhow::Result<()> {
@@ -57,6 +58,7 @@ async fn app() -> anyhow::Result<()> {
resource::spawn_build_state_refresh_loop();
resource::spawn_repo_state_refresh_loop();
resource::spawn_procedure_state_refresh_loop();
resource::spawn_action_state_refresh_loop();
resource::spawn_resource_sync_state_refresh_loop();
helpers::prune::spawn_prune_loop();
@@ -75,6 +77,7 @@ async fn app() -> anyhow::Result<()> {
.nest("/execute", api::execute::router())
.nest("/listener", listener::router())
.nest("/ws", ws::router())
.nest("/client", ts_client::router())
.nest_service("/", serve_dir)
.fallback_service(frontend_index)
.layer(cors()?)

View File

@@ -2,9 +2,7 @@ use std::collections::HashMap;
use anyhow::Context;
use komodo_client::entities::{
resource::ResourceQuery,
server::{Server, ServerListItem},
user::User,
resource::ResourceQuery, server::Server, user::User,
};
use crate::resource;
@@ -32,16 +30,16 @@ pub async fn check_alerts(ts: i64) {
}
#[instrument(level = "debug")]
async fn get_all_servers_map() -> anyhow::Result<(
HashMap<String, ServerListItem>,
HashMap<String, String>,
)> {
let servers = resource::list_for_user::<Server>(
async fn get_all_servers_map(
) -> anyhow::Result<(HashMap<String, Server>, HashMap<String, String>)>
{
let servers = resource::list_full_for_user::<Server>(
ResourceQuery::default(),
&User {
admin: true,
..Default::default()
},
&[],
)
.await
.context("failed to get servers from db (in alert_servers)")?;

View File

@@ -5,7 +5,7 @@ use derive_variants::ExtractVariant;
use komodo_client::entities::{
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
komodo_timestamp, optional_string,
server::{ServerListItem, ServerState},
server::{Server, ServerState},
ResourceTarget,
};
use mongo_indexed::Indexed;
@@ -28,7 +28,7 @@ type OpenDiskAlertMap = OpenAlertMap<PathBuf>;
#[instrument(level = "debug")]
pub async fn alert_servers(
ts: i64,
mut servers: HashMap<String, ServerListItem>,
mut servers: HashMap<String, Server>,
) {
let server_statuses = server_status_cache().get_list().await;
@@ -70,12 +70,12 @@ pub async fn alert_servers(
data: AlertData::ServerUnreachable {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
err: server_status.err.clone(),
},
};
alerts_to_open
.push((alert, server.info.send_unreachable_alerts))
.push((alert, server.config.send_unreachable_alerts))
}
(ServerState::NotOk, Some(alert)) => {
// update alert err
@@ -102,8 +102,10 @@ pub async fn alert_servers(
// Close an open alert
(ServerState::Ok | ServerState::Disabled, Some(alert)) => {
alert_ids_to_close
.push((alert.clone(), server.info.send_unreachable_alerts));
alert_ids_to_close.push((
alert.clone(),
server.config.send_unreachable_alerts,
));
}
_ => {}
}
@@ -119,20 +121,21 @@ pub async fn alert_servers(
.as_ref()
.and_then(|alerts| alerts.get(&AlertDataVariant::ServerCpu))
.cloned();
match (health.cpu, cpu_alert) {
(SeverityLevel::Warning | SeverityLevel::Critical, None) => {
match (health.cpu.level, cpu_alert, health.cpu.should_close_alert)
{
(SeverityLevel::Warning | SeverityLevel::Critical, None, _) => {
// open alert
let alert = Alert {
id: Default::default(),
ts,
resolved: false,
resolved_ts: None,
level: health.cpu,
level: health.cpu.level,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.as_ref()
@@ -140,41 +143,44 @@ pub async fn alert_servers(
.unwrap_or(0.0),
},
};
alerts_to_open.push((alert, server.info.send_cpu_alerts));
alerts_to_open.push((alert, server.config.send_cpu_alerts));
}
(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
_,
) => {
// modify alert level only if it has increased
if alert.level < health.cpu {
alert.level = health.cpu;
if alert.level < health.cpu.level {
alert.level = health.cpu.level;
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
};
alerts_to_update.push((alert, server.info.send_cpu_alerts));
alerts_to_update
.push((alert, server.config.send_cpu_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => {
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerCpu {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
percentage: server_status
.stats
.as_ref()
.map(|s| s.cpu_perc as f64)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_cpu_alerts))
alert_ids_to_close
.push((alert, server.config.send_cpu_alerts))
}
_ => {}
}
@@ -186,20 +192,21 @@ pub async fn alert_servers(
.as_ref()
.and_then(|alerts| alerts.get(&AlertDataVariant::ServerMem))
.cloned();
match (health.mem, mem_alert) {
(SeverityLevel::Warning | SeverityLevel::Critical, None) => {
match (health.mem.level, mem_alert, health.mem.should_close_alert)
{
(SeverityLevel::Warning | SeverityLevel::Critical, None, _) => {
// open alert
let alert = Alert {
id: Default::default(),
ts,
resolved: false,
resolved_ts: None,
level: health.mem,
level: health.mem.level,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.as_ref()
@@ -212,19 +219,20 @@ pub async fn alert_servers(
.unwrap_or(0.0),
},
};
alerts_to_open.push((alert, server.info.send_mem_alerts));
alerts_to_open.push((alert, server.config.send_mem_alerts));
}
(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
_,
) => {
// modify alert level only if it has increased
if alert.level < health.mem {
alert.level = health.mem;
if alert.level < health.mem.level {
alert.level = health.mem.level;
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.as_ref()
@@ -236,15 +244,16 @@ pub async fn alert_servers(
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
};
alerts_to_update.push((alert, server.info.send_mem_alerts));
alerts_to_update
.push((alert, server.config.send_mem_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => {
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
alert.data = AlertData::ServerMem {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
total_gb: server_status
.stats
.as_ref()
@@ -256,7 +265,8 @@ pub async fn alert_servers(
.map(|s| s.mem_used_gb)
.unwrap_or(0.0),
};
alert_ids_to_close.push((alert, server.info.send_mem_alerts))
alert_ids_to_close
.push((alert, server.config.send_mem_alerts))
}
_ => {}
}
@@ -273,8 +283,12 @@ pub async fn alert_servers(
.as_ref()
.and_then(|alerts| alerts.get(path))
.cloned();
match (*health, disk_alert) {
(SeverityLevel::Warning | SeverityLevel::Critical, None) => {
match (health.level, disk_alert, health.should_close_alert) {
(
SeverityLevel::Warning | SeverityLevel::Critical,
None,
_,
) => {
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
@@ -283,58 +297,60 @@ pub async fn alert_servers(
ts,
resolved: false,
resolved_ts: None,
level: *health,
level: health.level,
target: ResourceTarget::Server(server_status.id.clone()),
data: AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
},
};
alerts_to_open.push((alert, server.info.send_disk_alerts));
alerts_to_open
.push((alert, server.config.send_disk_alerts));
}
(
SeverityLevel::Warning | SeverityLevel::Critical,
Some(mut alert),
_,
) => {
// Disk is persistent, update alert if health changes regardless of direction
if *health != alert.level {
// modify alert level only if it has increased
if health.level < alert.level {
let disk =
server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = *health;
alert.level = health.level;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
};
alerts_to_update
.push((alert, server.info.send_disk_alerts));
.push((alert, server.config.send_disk_alerts));
}
}
(SeverityLevel::Ok, Some(alert)) => {
(SeverityLevel::Ok, Some(alert), true) => {
let mut alert = alert.clone();
let disk = server_status.stats.as_ref().and_then(|stats| {
stats.disks.iter().find(|disk| disk.mount == *path)
});
alert.level = *health;
alert.level = health.level;
alert.data = AlertData::ServerDisk {
id: server_status.id.clone(),
name: server.name.clone(),
region: optional_string(&server.info.region),
region: optional_string(&server.config.region),
path: path.to_owned(),
total_gb: disk.map(|d| d.total_gb).unwrap_or_default(),
used_gb: disk.map(|d| d.used_gb).unwrap_or_default(),
};
alert_ids_to_close
.push((alert, server.info.send_disk_alerts))
.push((alert, server.config.send_disk_alerts))
}
_ => {}
}
@@ -347,7 +363,7 @@ pub async fn alert_servers(
let mut alert = alert.clone();
alert.level = SeverityLevel::Ok;
alert_ids_to_close
.push((alert, server.info.send_disk_alerts));
.push((alert, server.config.send_disk_alerts));
}
}
}

View File

@@ -6,7 +6,10 @@ use komodo_client::entities::{
network::NetworkListItem, volume::VolumeListItem,
},
repo::Repo,
server::{Server, ServerConfig, ServerHealth, ServerState},
server::{
Server, ServerConfig, ServerHealth, ServerHealthState,
ServerState,
},
stack::{ComposeProject, Stack, StackState},
stats::{SingleDiskUsage, SystemStats},
};
@@ -126,6 +129,8 @@ pub async fn insert_server_status(
.await;
}
const ALERT_PERCENTAGE_THRESHOLD: f32 = 5.0;
fn get_server_health(
server: &Server,
SystemStats {
@@ -148,16 +153,22 @@ fn get_server_health(
let mut health = ServerHealth::default();
if cpu_perc >= cpu_critical {
health.cpu = SeverityLevel::Critical
health.cpu.level = SeverityLevel::Critical;
} else if cpu_perc >= cpu_warning {
health.cpu = SeverityLevel::Warning
health.cpu.level = SeverityLevel::Warning
} else if *cpu_perc < cpu_warning - ALERT_PERCENTAGE_THRESHOLD {
health.cpu.should_close_alert = true
}
let mem_perc = 100.0 * mem_used_gb / mem_total_gb;
if mem_perc >= *mem_critical {
health.mem = SeverityLevel::Critical
health.mem.level = SeverityLevel::Critical
} else if mem_perc >= *mem_warning {
health.mem = SeverityLevel::Warning
health.mem.level = SeverityLevel::Warning
} else if mem_perc
< mem_warning - (ALERT_PERCENTAGE_THRESHOLD as f64)
{
health.mem.should_close_alert = true
}
for SingleDiskUsage {
@@ -168,14 +179,17 @@ fn get_server_health(
} in disks
{
let perc = 100.0 * used_gb / total_gb;
let stats_state = if perc >= *disk_critical {
SeverityLevel::Critical
let mut state = ServerHealthState::default();
if perc >= *disk_critical {
state.level = SeverityLevel::Critical;
} else if perc >= *disk_warning {
SeverityLevel::Warning
} else {
SeverityLevel::Ok
state.level = SeverityLevel::Warning;
} else if perc
< disk_warning - (ALERT_PERCENTAGE_THRESHOLD as f64)
{
state.should_close_alert = true;
};
health.disks.insert(mount.clone(), stats_state);
health.disks.insert(mount.clone(), state);
}
health

View File

@@ -206,7 +206,10 @@ pub async fn update_cache_for_server(server: &Server) {
};
match lists::get_docker_lists(&periphery).await {
Ok((containers, networks, images, volumes, projects)) => {
Ok((mut containers, networks, images, volumes, projects)) => {
containers.iter_mut().for_each(|container| {
container.server_id = Some(server.id.clone())
});
tokio::join!(
resources::update_deployment_cache(deployments, &containers),
resources::update_stack_cache(stacks, &containers),

View File

@@ -0,0 +1,219 @@
use std::time::Duration;
use anyhow::Context;
use komodo_client::entities::{
action::{
Action, ActionConfig, ActionConfigDiff, ActionInfo,
ActionListItem, ActionListItemInfo, ActionQuerySpecifics,
ActionState, PartialActionConfig,
},
resource::Resource,
update::Update,
user::User,
Operation, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOneOptions, Collection},
};
use crate::state::{action_state_cache, action_states, db_client};
impl super::KomodoResource for Action {
type Config = ActionConfig;
type PartialConfig = PartialActionConfig;
type ConfigDiff = ActionConfigDiff;
type Info = ActionInfo;
type ListItem = ActionListItem;
type QuerySpecifics = ActionQuerySpecifics;
fn resource_type() -> ResourceTargetVariant {
ResourceTargetVariant::Action
}
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().actions
}
async fn to_list_item(
action: Resource<Self::Config, Self::Info>,
) -> Self::ListItem {
let state = get_action_state(&action.id).await;
ActionListItem {
name: action.name,
id: action.id,
tags: action.tags,
resource_type: ResourceTargetVariant::Action,
info: ActionListItemInfo {
state,
last_run_at: action.info.last_run_at,
},
}
}
async fn busy(id: &String) -> anyhow::Result<bool> {
action_states()
.action
.get(id)
.await
.unwrap_or_default()
.busy()
}
// CREATE
fn create_operation() -> Operation {
Operation::CreateAction
}
fn user_can_create(user: &User) -> bool {
user.admin
}
async fn validate_create_config(
config: &mut Self::PartialConfig,
_user: &User,
) -> anyhow::Result<()> {
if config.file_contents.is_none() {
config.file_contents =
Some(DEFAULT_ACTION_FILE_CONTENTS.to_string());
}
Ok(())
}
async fn post_create(
_created: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
refresh_action_state_cache().await;
Ok(())
}
// UPDATE
fn update_operation() -> Operation {
Operation::UpdateAction
}
async fn validate_update_config(
_id: &str,
_config: &mut Self::PartialConfig,
_user: &User,
) -> anyhow::Result<()> {
Ok(())
}
async fn post_update(
updated: &Self,
update: &mut Update,
) -> anyhow::Result<()> {
Self::post_create(updated, update).await
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameAction
}
// DELETE
fn delete_operation() -> Operation {
Operation::DeleteAction
}
async fn pre_delete(
_resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
Ok(())
}
async fn post_delete(
_resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
Ok(())
}
}
pub fn spawn_action_state_refresh_loop() {
tokio::spawn(async move {
loop {
refresh_action_state_cache().await;
tokio::time::sleep(Duration::from_secs(60)).await;
}
});
}
pub async fn refresh_action_state_cache() {
let _ = async {
let actions = find_collect(&db_client().actions, None, None)
.await
.context("Failed to get Actions from db")?;
let cache = action_state_cache();
for action in actions {
let state = get_action_state_from_db(&action.id).await;
cache.insert(action.id, state).await;
}
anyhow::Ok(())
}
.await
.inspect_err(|e| {
error!("Failed to refresh Action state cache | {e:#}")
});
}
async fn get_action_state(id: &String) -> ActionState {
if action_states()
.action
.get(id)
.await
.map(|s| s.get().map(|s| s.running))
.transpose()
.ok()
.flatten()
.unwrap_or_default()
{
return ActionState::Running;
}
action_state_cache().get(id).await.unwrap_or_default()
}
async fn get_action_state_from_db(id: &str) -> ActionState {
async {
let state = db_client()
.updates
.find_one(doc! {
"target.type": "Action",
"target.id": id,
"operation": "RunAction"
})
.with_options(
FindOneOptions::builder()
.sort(doc! { "start_ts": -1 })
.build(),
)
.await?
.map(|u| {
if u.success {
ActionState::Ok
} else {
ActionState::Failed
}
})
.unwrap_or(ActionState::Ok);
anyhow::Ok(state)
}
.await
.inspect_err(|e| {
warn!("Failed to get Action state for {id} | {e:#}")
})
.unwrap_or(ActionState::Unknown)
}
const DEFAULT_ACTION_FILE_CONTENTS: &str =
"// Run actions using the pre initialized 'komodo' client.
const version: Types.GetVersionResponse = await komodo.read('GetVersion', {});
console.log('🦎 Komodo version:', version.version, '🦎\\n');";

View File

@@ -25,8 +25,8 @@ impl super::KomodoResource for Alerter {
ResourceTargetVariant::Alerter
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().alerters
}
@@ -94,6 +94,12 @@ impl super::KomodoResource for Alerter {
Ok(())
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameAlerter
}
// DELETE
fn delete_operation() -> Operation {

View File

@@ -38,8 +38,8 @@ impl super::KomodoResource for Build {
ResourceTargetVariant::Build
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().builds
}
@@ -118,11 +118,16 @@ impl super::KomodoResource for Build {
}
async fn post_update(
_updated: &Self,
_update: &mut Update,
updated: &Self,
update: &mut Update,
) -> anyhow::Result<()> {
refresh_build_state_cache().await;
Ok(())
Self::post_create(updated, update).await
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameBuild
}
// DELETE
@@ -180,9 +185,13 @@ async fn validate_config(
) -> anyhow::Result<()> {
if let Some(builder_id) = &config.builder_id {
if !builder_id.is_empty() {
let builder = super::get_check_permissions::<Builder>(builder_id, user, PermissionLevel::Read)
.await
.context("cannot create build using this builder. user must have at least read permissions on the builder.")?;
let builder = super::get_check_permissions::<Builder>(
builder_id,
user,
PermissionLevel::Read,
)
.await
.context("Cannot attach Build to this Builder")?;
config.builder_id = Some(builder.id)
}
}

View File

@@ -31,8 +31,8 @@ impl super::KomodoResource for Builder {
ResourceTargetVariant::Builder
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().builders
}
@@ -118,6 +118,12 @@ impl super::KomodoResource for Builder {
Ok(())
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameBuilder
}
// DELETE
fn delete_operation() -> Operation {
@@ -128,17 +134,22 @@ impl super::KomodoResource for Builder {
resource: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
// remove the builder from any attached builds
db_client()
.builds
.update_many(
doc! { "config.builder.params.builder_id": &resource.id },
mungos::update::Update::Set(
doc! { "config.builder.params.builder_id": "" },
),
doc! { "config.builder_id": &resource.id },
mungos::update::Update::Set(doc! { "config.builder_id": "" }),
)
.await
.context("failed to update_many builds on database")?;
db_client()
.repos
.update_many(
doc! { "config.builder_id": &resource.id },
mungos::update::Update::Set(doc! { "config.builder_id": "" }),
)
.await
.context("failed to update_many repos on database")?;
Ok(())
}

View File

@@ -26,7 +26,6 @@ use crate::{
query::get_deployment_state,
},
monitor::update_cache_for_server,
resource,
state::{action_states, db_client, deployment_status_cache},
};
@@ -44,8 +43,8 @@ impl super::KomodoResource for Deployment {
ResourceTargetVariant::Deployment
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().deployments
}
@@ -132,11 +131,21 @@ impl super::KomodoResource for Deployment {
created: &Resource<Self::Config, Self::Info>,
_update: &mut Update,
) -> anyhow::Result<()> {
if !created.config.server_id.is_empty() {
let server =
resource::get::<Server>(&created.config.server_id).await?;
update_cache_for_server(&server).await;
if created.config.server_id.is_empty() {
return Ok(());
}
let Ok(server) = super::get::<Server>(&created.config.server_id)
.await
.inspect_err(|e| {
warn!(
"Failed to get Server for Deployment {} | {e:#}",
created.name
)
})
else {
return Ok(());
};
update_cache_for_server(&server).await;
Ok(())
}
@@ -156,14 +165,15 @@ impl super::KomodoResource for Deployment {
async fn post_update(
updated: &Self,
_update: &mut Update,
update: &mut Update,
) -> anyhow::Result<()> {
if !updated.config.server_id.is_empty() {
let server =
resource::get::<Server>(&updated.config.server_id).await?;
update_cache_for_server(&server).await;
}
Ok(())
Self::post_create(updated, update).await
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameDeployment
}
// DELETE
@@ -262,9 +272,13 @@ async fn validate_config(
) -> anyhow::Result<()> {
if let Some(server_id) = &config.server_id {
if !server_id.is_empty() {
let server = get_check_permissions::<Server>(server_id, user, PermissionLevel::Write)
.await
.context("cannot create deployment on this server. user must have update permissions on the server to perform this action.")?;
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Write,
)
.await
.context("Cannot attach Deployment to this Server")?;
config.server_id = Some(server.id);
}
}
@@ -272,9 +286,15 @@ async fn validate_config(
&config.image
{
if !build_id.is_empty() {
let build = get_check_permissions::<Build>(build_id, user, PermissionLevel::Read)
.await
.context("cannot create deployment with this build attached. user must have at least read permissions on the build to perform this action.")?;
let build = get_check_permissions::<Build>(
build_id,
user,
PermissionLevel::Read,
)
.await
.context(
"Cannot update deployment with this build attached.",
)?;
config.image = Some(DeploymentImage::Build {
build_id: build.id,
version: *version,

View File

@@ -45,6 +45,7 @@ use crate::{
state::{db_client, State},
};
mod action;
mod alerter;
mod build;
mod builder;
@@ -57,6 +58,9 @@ mod server_template;
mod stack;
mod sync;
pub use action::{
refresh_action_state_cache, spawn_action_state_refresh_loop,
};
pub use build::{
refresh_build_state_cache, spawn_build_state_refresh_loop,
};
@@ -106,8 +110,7 @@ pub trait KomodoResource {
fn resource_type() -> ResourceTargetVariant;
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>>;
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>;
async fn to_list_item(
resource: Resource<Self::Config, Self::Info>,
@@ -165,6 +168,12 @@ pub trait KomodoResource {
update: &mut Update,
) -> anyhow::Result<()>;
// =======
// RENAME
// =======
fn rename_operation() -> Operation;
// =======
// DELETE
// =======
@@ -195,7 +204,6 @@ pub async fn get<T: KomodoResource>(
id_or_name: &str,
) -> anyhow::Result<Resource<T::Config, T::Info>> {
T::coll()
.await
.find_one(id_or_name_filter(id_or_name))
.await
.context("failed to query db for resource")?
@@ -228,7 +236,7 @@ pub async fn get_check_permissions<T: KomodoResource>(
Ok(resource)
} else {
Err(anyhow!(
"user does not have required permissions on this {}",
"User does not have required permissions on this {}. Must have at least {permission_level} permissions",
T::resource_type()
))
}
@@ -240,9 +248,24 @@ pub async fn get_check_permissions<T: KomodoResource>(
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_user<T: KomodoResource>(
pub async fn get_resource_object_ids_for_user<T: KomodoResource>(
user: &User,
) -> anyhow::Result<Option<Vec<ObjectId>>> {
get_resource_ids_for_user::<T>(user).await.map(|ids| {
ids.map(|ids| {
ids
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect()
})
})
}
/// Returns None if still no need to filter by resource id (eg transparent mode, group membership with all access).
#[instrument(level = "debug")]
pub async fn get_resource_ids_for_user<T: KomodoResource>(
user: &User,
) -> anyhow::Result<Option<Vec<String>>> {
// Check admin or transparent mode
if user.admin || core_config().transparent_mode {
return Ok(None);
@@ -270,8 +293,8 @@ pub async fn get_resource_ids_for_user<T: KomodoResource>(
let (base, perms) = tokio::try_join!(
// Get any resources with non-none base permission,
find_collect(
T::coll().await,
doc! { "base_permission": { "$ne": "None" } },
T::coll(),
doc! { "base_permission": { "$exists": true, "$ne": "None" } },
None,
)
.map(|res| res.with_context(|| format!(
@@ -283,7 +306,7 @@ pub async fn get_resource_ids_for_user<T: KomodoResource>(
doc! {
"$or": user_target_query(&user.id, &groups)?,
"resource_target.type": resource_type.as_ref(),
"level": { "$in": ["Read", "Execute", "Write"] }
"level": { "$exists": true, "$ne": "None" }
},
None,
)
@@ -297,9 +320,6 @@ pub async fn get_resource_ids_for_user<T: KomodoResource>(
// Chain in the ones with non-None base permissions
.chain(base.into_iter().map(|res| res.id))
// collect into hashset first to remove any duplicates
.collect::<HashSet<_>>()
.into_iter()
.flat_map(|id| ObjectId::from_str(&id))
.collect::<HashSet<_>>();
Ok(Some(ids.into_iter().collect()))
@@ -382,8 +402,9 @@ pub async fn get_user_permission_on_resource<T: KomodoResource>(
pub async fn list_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<T::ListItem>> {
validate_resource_query_tags(&mut query).await;
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_for_user_using_document::<T>(filters, user).await
@@ -404,8 +425,9 @@ pub async fn list_for_user_using_document<T: KomodoResource>(
pub async fn list_full_for_user<T: KomodoResource>(
mut query: ResourceQuery<T::QuerySpecifics>,
user: &User,
all_tags: &[Tag],
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
validate_resource_query_tags(&mut query).await;
validate_resource_query_tags(&mut query, all_tags)?;
let mut filters = Document::new();
query.add_filters(&mut filters);
list_full_for_user_using_document::<T>(filters, user).await
@@ -416,11 +438,13 @@ pub async fn list_full_for_user_using_document<T: KomodoResource>(
mut filters: Document,
user: &User,
) -> anyhow::Result<Vec<Resource<T::Config, T::Info>>> {
if let Some(ids) = get_resource_ids_for_user::<T>(user).await? {
if let Some(ids) =
get_resource_object_ids_for_user::<T>(user).await?
{
filters.insert("_id", doc! { "$in": ids });
}
find_collect(
T::coll().await,
T::coll(),
filters,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
@@ -443,7 +467,7 @@ pub async fn get_id_to_resource_map<T: KomodoResource>(
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> anyhow::Result<IdResourceMap<T>> {
let res = find_collect(T::coll().await, None, None)
let res = find_collect(T::coll(), None, None)
.await
.with_context(|| {
format!("failed to pull {}s from mongo", T::resource_type())
@@ -510,7 +534,7 @@ pub async fn create<T: KomodoResource>(
// Ensure an existing resource with same name doesn't already exist
// The database indexing also ensures this but doesn't give a good error message.
if list_full_for_user::<T>(Default::default(), system_user())
if list_full_for_user::<T>(Default::default(), system_user(), &[])
.await
.context("Failed to list all resources for duplicate name check")?
.into_iter()
@@ -535,7 +559,6 @@ pub async fn create<T: KomodoResource>(
};
let resource_id = T::coll()
.await
.insert_one(&resource)
.await
.with_context(|| {
@@ -603,7 +626,7 @@ pub async fn update<T: KomodoResource>(
let diff = resource.config.partial_diff(config);
if diff.is_none() {
return Err(anyhow!("update has no changes"));
return Ok(resource);
}
let mut diff_log = String::from("diff");
@@ -624,14 +647,9 @@ pub async fn update<T: KomodoResource>(
let update_doc = flatten_document(doc! { "config": config_doc });
update_one_by_id(
T::coll().await,
&id,
doc! { "$set": update_doc },
None,
)
.await
.context("failed to update resource on database")?;
update_one_by_id(T::coll(), &id, doc! { "$set": update_doc }, None)
.await
.context("failed to update resource on database")?;
let mut update = make_update(
resource_target::<T>(id),
@@ -671,6 +689,7 @@ fn resource_target<T: KomodoResource>(id: String) -> ResourceTarget {
ResourceTarget::ResourceSync(id)
}
ResourceTargetVariant::Stack => ResourceTarget::Stack(id),
ResourceTargetVariant::Action => ResourceTarget::Action(id),
}
}
@@ -686,7 +705,6 @@ pub async fn update_description<T: KomodoResource>(
)
.await?;
T::coll()
.await
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "description": description } },
@@ -720,7 +738,6 @@ pub async fn update_tags<T: KomodoResource>(
.flatten()
.collect::<Vec<_>>();
T::coll()
.await
.update_one(
id_or_name_filter(id_or_name),
doc! { "$set": { "tags": tags } },
@@ -733,13 +750,67 @@ pub async fn remove_tag_from_all<T: KomodoResource>(
tag_id: &str,
) -> anyhow::Result<()> {
T::coll()
.await
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } })
.await
.context("failed to remove tag from resources")?;
Ok(())
}
// =======
// RENAME
// =======
pub async fn rename<T: KomodoResource>(
id_or_name: &str,
name: &str,
user: &User,
) -> anyhow::Result<Update> {
let resource = get_check_permissions::<T>(
id_or_name,
user,
PermissionLevel::Write,
)
.await?;
let mut update = make_update(
resource_target::<T>(resource.id.clone()),
T::rename_operation(),
user,
);
let name = to_komodo_name(name);
update_one_by_id(
T::coll(),
&resource.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)
.await
.with_context(|| {
format!(
"Failed to update {ty} on db. This name may already be taken.",
ty = T::resource_type()
)
})?;
update.push_simple_log(
&format!("Rename {}", T::resource_type()),
format!(
"Renamed {ty} {id} from {prev_name} to {name}",
ty = T::resource_type(),
id = resource.id,
prev_name = resource.name
),
);
update.finalize();
update.id = add_update(update.clone()).await?;
Ok(update)
}
// =======
// DELETE
// =======
@@ -769,7 +840,7 @@ pub async fn delete<T: KomodoResource>(
delete_all_permissions_on_resource(target.clone()).await;
remove_from_recently_viewed(target.clone()).await;
delete_one_by_id(T::coll().await, &resource.id, None)
delete_one_by_id(T::coll(), &resource.id, None)
.await
.with_context(|| {
format!("failed to delete {} from database", T::resource_type())
@@ -793,14 +864,24 @@ pub async fn delete<T: KomodoResource>(
// =======
#[instrument(level = "debug")]
pub async fn validate_resource_query_tags<
T: Default + std::fmt::Debug,
>(
pub fn validate_resource_query_tags<T: Default + std::fmt::Debug>(
query: &mut ResourceQuery<T>,
) {
let futures = query.tags.iter().map(|tag| get_tag(tag));
let res = join_all(futures).await;
query.tags = res.into_iter().flatten().map(|tag| tag.id).collect();
all_tags: &[Tag],
) -> anyhow::Result<()> {
query.tags = query
.tags
.iter()
.map(|tag| {
all_tags
.iter()
.find(|t| t.name == *tag || t.id == *tag)
.map(|tag| tag.id.clone())
.with_context(|| {
format!("No tag found matching name or id: {}", tag)
})
})
.collect::<anyhow::Result<Vec<_>>>()?;
Ok(())
}
#[instrument]
@@ -834,6 +915,7 @@ where
ResourceTarget::Build(id) => ("recents.Build", id),
ResourceTarget::Repo(id) => ("recents.Repo", id),
ResourceTarget::Procedure(id) => ("recents.Procedure", id),
ResourceTarget::Action(id) => ("recents.Action", id),
ResourceTarget::Stack(id) => ("recents.Stack", id),
ResourceTarget::Builder(id) => ("recents.Builder", id),
ResourceTarget::Alerter(id) => ("recents.Alerter", id),

View File

@@ -4,6 +4,7 @@ use anyhow::{anyhow, Context};
use komodo_client::{
api::execute::Execution,
entities::{
action::Action,
build::Build,
deployment::Deployment,
permission::PermissionLevel,
@@ -44,8 +45,8 @@ impl super::KomodoResource for Procedure {
ResourceTargetVariant::Procedure
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().procedures
}
@@ -114,11 +115,16 @@ impl super::KomodoResource for Procedure {
}
async fn post_update(
_updated: &Self,
_update: &mut Update,
updated: &Self,
update: &mut Update,
) -> anyhow::Result<()> {
refresh_procedure_state_cache().await;
Ok(())
Self::post_create(updated, update).await
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameProcedure
}
// DELETE
@@ -172,6 +178,15 @@ async fn validate_config(
}
params.procedure = procedure.id;
}
Execution::RunAction(params) => {
let action = super::get_check_permissions::<Action>(
&params.action,
user,
PermissionLevel::Execute,
)
.await?;
params.action = action.id;
}
Execution::RunBuild(params) => {
let build = super::get_check_permissions::<Build>(
&params.build,
@@ -494,6 +509,16 @@ async fn validate_config(
.await?;
params.sync = sync.id;
}
Execution::CommitSync(params) => {
// This one is actually a write operation.
let sync = super::get_check_permissions::<ResourceSync>(
&params.sync,
user,
PermissionLevel::Write,
)
.await?;
params.sync = sync.id;
}
Execution::DeployStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -503,6 +528,15 @@ async fn validate_config(
.await?;
params.stack = stack.id;
}
Execution::DeployStackIfChanged(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
user,
PermissionLevel::Execute,
)
.await?;
params.stack = stack.id;
}
Execution::StartStack(params) => {
let stack = super::get_check_permissions::<Stack>(
&params.stack,
@@ -579,7 +613,7 @@ pub async fn refresh_procedure_state_cache() {
let procedures =
find_collect(&db_client().procedures, None, None)
.await
.context("failed to get procedures from db")?;
.context("Failed to get Procedures from db")?;
let cache = procedure_state_cache();
for procedure in procedures {
let state = get_procedure_state_from_db(&procedure.id).await;
@@ -589,7 +623,7 @@ pub async fn refresh_procedure_state_cache() {
}
.await
.inspect_err(|e| {
error!("failed to refresh build state cache | {e:#}")
error!("Failed to refresh Procedure state cache | {e:#}")
});
}
@@ -636,7 +670,7 @@ async fn get_procedure_state_from_db(id: &str) -> ProcedureState {
}
.await
.inspect_err(|e| {
warn!("failed to get procedure state for {id} | {e:#}")
warn!("Failed to get Procedure state for {id} | {e:#}")
})
.unwrap_or(ProcedureState::Unknown)
}

View File

@@ -11,6 +11,7 @@ use komodo_client::entities::{
},
resource::Resource,
server::Server,
to_komodo_name,
update::Update,
user::User,
Operation, ResourceTargetVariant,
@@ -43,8 +44,8 @@ impl super::KomodoResource for Repo {
ResourceTargetVariant::Repo
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().repos
}
@@ -132,6 +133,12 @@ impl super::KomodoResource for Repo {
Ok(())
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameRepo
}
// DELETE
fn delete_operation() -> Operation {
@@ -158,7 +165,11 @@ impl super::KomodoResource for Repo {
match periphery
.request(DeleteRepo {
name: repo.name.clone(),
name: if repo.config.path.is_empty() {
to_komodo_name(&repo.name)
} else {
repo.config.path.clone()
},
})
.await
{
@@ -213,15 +224,19 @@ async fn validate_config(
PermissionLevel::Write,
)
.await
.context("Cannot attach repo to this server. User must have write permissions on the server.")?;
.context("Cannot attach Repo to this Server")?;
config.server_id = Some(server.id);
}
}
if let Some(builder_id) = &config.builder_id {
if !builder_id.is_empty() {
let builder = super::get_check_permissions::<Builder>(builder_id, user, PermissionLevel::Read)
.await
.context("Cannot attach repo to this builder. User must have at least read permissions on the builder.")?;
let builder = super::get_check_permissions::<Builder>(
builder_id,
user,
PermissionLevel::Read,
)
.await
.context("Cannot attach Repo to this Builder")?;
config.builder_id = Some(builder.id);
}
}

View File

@@ -30,8 +30,8 @@ impl super::KomodoResource for Server {
ResourceTargetVariant::Server
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().servers
}
@@ -115,6 +115,12 @@ impl super::KomodoResource for Server {
Ok(())
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameServer
}
// DELETE
fn delete_operation() -> Operation {

View File

@@ -29,8 +29,8 @@ impl super::KomodoResource for ServerTemplate {
ResourceTargetVariant::ServerTemplate
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().server_templates
}
@@ -117,6 +117,12 @@ impl super::KomodoResource for ServerTemplate {
Ok(())
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameServerTemplate
}
// DELETE
fn delete_operation() -> Operation {

View File

@@ -44,8 +44,8 @@ impl super::KomodoResource for Stack {
ResourceTargetVariant::Stack
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().stacks
}
@@ -172,7 +172,7 @@ impl super::KomodoResource for Stack {
.await
.inspect_err(|e| {
warn!(
"Failed to get server for stack {} | {e:#}",
"Failed to get Server for Stack {} | {e:#}",
created.name
)
})
@@ -204,6 +204,12 @@ impl super::KomodoResource for Stack {
Self::post_create(updated, update).await
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameStack
}
// DELETE
fn delete_operation() -> Operation {
@@ -301,9 +307,13 @@ async fn validate_config(
) -> anyhow::Result<()> {
if let Some(server_id) = &config.server_id {
if !server_id.is_empty() {
let server = get_check_permissions::<Server>(server_id, user, PermissionLevel::Write)
.await
.context("cannot create stack on this server. user must have update permissions on the server to perform this action.")?;
let server = get_check_permissions::<Server>(
server_id,
user,
PermissionLevel::Write,
)
.await
.context("Cannot attach stack to this Server")?;
// in case it comes in as name
config.server_id = Some(server.id);
}

View File

@@ -41,8 +41,8 @@ impl super::KomodoResource for ResourceSync {
ResourceTargetVariant::ResourceSync
}
async fn coll(
) -> &'static Collection<Resource<Self::Config, Self::Info>> {
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
{
&db_client().resource_syncs
}
@@ -117,6 +117,7 @@ impl super::KomodoResource for ResourceSync {
format_serror(&e.context("The sync pending cache has failed to refresh. This is likely due to a misconfiguration of the sync").into())
);
};
refresh_resource_sync_state_cache().await;
Ok(())
}
@@ -141,6 +142,12 @@ impl super::KomodoResource for ResourceSync {
Self::post_create(updated, update).await
}
// RENAME
fn rename_operation() -> Operation {
Operation::RenameResourceSync
}
// DELETE
fn delete_operation() -> Operation {

View File

@@ -5,6 +5,7 @@ use std::{
use anyhow::Context;
use komodo_client::entities::{
action::ActionState,
build::BuildState,
config::core::{CoreConfig, GithubWebhookAppConfig},
deployment::DeploymentState,
@@ -191,6 +192,14 @@ pub fn procedure_state_cache() -> &'static ProcedureStateCache {
PROCEDURE_STATE_CACHE.get_or_init(Default::default)
}
pub type ActionStateCache = Cache<String, ActionState>;
pub fn action_state_cache() -> &'static ActionStateCache {
static ACTION_STATE_CACHE: OnceLock<ActionStateCache> =
OnceLock::new();
ACTION_STATE_CACHE.get_or_init(Default::default)
}
pub type ResourceSyncStateCache = Cache<String, ResourceSyncState>;
pub fn resource_sync_state_cache() -> &'static ResourceSyncStateCache

View File

@@ -18,7 +18,7 @@ use komodo_client::{
toml::ResourceToml,
update::Log,
user::sync_user,
ResourceTarget,
FileContents, ResourceTarget,
},
};
use resolver_api::Resolve;
@@ -26,7 +26,6 @@ use resolver_api::Resolve;
use crate::{
api::execute::ExecuteRequest,
helpers::update::init_execution_update,
stack::remote::ensure_remote_repo,
state::{deployment_status_cache, stack_status_cache, State},
};
@@ -541,7 +540,45 @@ fn build_cache_for_stack<'a>(
StackState::Running => {
// Here can diff the changes, to see if they merit a redeploy.
// First merge toml resource config (partial) onto default resource config.
// See if any remote contents don't match deployed contents
match (
&original.info.deployed_contents,
&original.info.remote_contents,
) {
(Some(deployed_contents), Some(remote_contents)) => {
for FileContents { path, contents } in remote_contents {
if let Some(deployed) =
deployed_contents.iter().find(|c| &c.path == path)
{
if &deployed.contents != contents {
cache.insert(
target,
Some((
format!(
"File contents for {path} have changed"
),
after,
)),
);
return Ok(());
}
} else {
cache.insert(
target,
Some((
format!("New file contents at {path}"),
after,
)),
);
return Ok(());
}
}
}
// Maybe should handle other cases
_ => {}
}
// Merge toml resource config (partial) onto default resource config.
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
let config: StackConfig = stack.config.clone().into();
let mut config: PartialStackConfig = config.into();
@@ -589,40 +626,6 @@ fn build_cache_for_stack<'a>(
}
};
// We know the config hasn't changed at this point, but still need
// to check if its a repo based stack, and the hash has updated.
// Can use 'original' for this (config hasn't changed)
if stack.latest_hash {
if let Some(deployed_hash) = &original.info.deployed_hash {
let (_, _, hash, _) = ensure_remote_repo(original.into())
.await
.context("failed to get latest hash for repo based stack")
.with_context(|| {
format!(
"Stack {} {}",
bold(&stack.name),
colored("has errors", Color::Red)
)
})?;
if let Some(hash) = hash {
if &hash != deployed_hash {
cache.insert(
target,
Some((
format!(
"outdated hash. deployed: {} -> latest: {}",
colored(deployed_hash, Color::Red),
colored(hash, Color::Green)
),
after,
)),
);
return Ok(());
}
}
}
}
// Check 'after' to see if they deploy.
insert_target_using_after_list(
target,

View File

@@ -6,6 +6,7 @@ use komodo_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
tag::Tag, toml::ResourceToml, update::Log, user::sync_user,
ResourceTargetVariant,
},
};
use mungos::find::find_collect;
@@ -26,20 +27,42 @@ pub async fn get_updates_for_execution<
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
all_resources: &AllResourcesById,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
let map = find_collect(Resource::coll().await, None, None)
let map = find_collect(Resource::coll(), None, None)
.await
.context("failed to get resources from db")?
.into_iter()
.filter(|r| {
Resource::include_resource(
&r.config, &r.tags, id_to_tags, match_tags,
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let resources = resources
.into_iter()
.filter(|r| {
Resource::include_resource_partial(
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.collect::<Vec<_>>();
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
@@ -54,15 +77,6 @@ pub async fn get_updates_for_execution<
}
for mut resource in resources {
// only resource that might not be included is resource sync
if !Resource::include_resource_partial(
&resource.config,
&resource.tags,
id_to_tags,
match_tags,
) {
continue;
}
match map.get(&resource.name) {
Some(original) => {
// First merge toml resource config (partial) onto default resource config.

View File

@@ -1,91 +1,217 @@
use std::{fs, path::Path};
use std::{
fs,
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, muted, Color};
use formatting::{bold, colored, format_serror, muted, Color};
use komodo_client::entities::{
sync::SyncFileContents,
toml::{ResourceToml, ResourcesToml},
update::Log,
FileContents,
};
pub fn read_resources(
path: &Path,
root_path: &Path,
resource_path: &[String],
match_tags: &[String],
logs: &mut Vec<Log>,
files: &mut Vec<FileContents>,
file_errors: &mut Vec<FileContents>,
files: &mut Vec<SyncFileContents>,
file_errors: &mut Vec<SyncFileContents>,
) -> anyhow::Result<ResourcesToml> {
let mut res = ResourcesToml::default();
let mut log =
format!("{}: reading resources from {path:?}", muted("INFO"));
if let Err(e) = read_resources_recursive(
path,
match_tags,
&mut res,
&mut log,
files,
file_errors,
)
.with_context(|| format!("failed to read resources from {path:?}"))
{
file_errors.push(FileContents {
path: path.display().to_string(),
contents: format_serror(&e.into()),
});
logs.push(Log::error("read remote resources", log));
} else {
logs.push(Log::simple("read remote resources", log));
};
Ok(res)
let mut resources = ResourcesToml::default();
for resource_path in resource_path {
let resource_path = resource_path
.parse::<PathBuf>()
.context("Invalid resource path")?;
let full_path = root_path
.join(&resource_path)
.components()
.collect::<PathBuf>();
let mut log = format!(
"{}: reading resources from {full_path:?}",
muted("INFO")
);
if full_path.is_file() {
if let Err(e) = read_resource_file(
root_path,
None,
&resource_path,
match_tags,
&mut resources,
&mut log,
files,
)
.with_context(|| {
format!("failed to read resources from {full_path:?}")
}) {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(&e.into()),
});
logs.push(Log::error("Read remote resources", log));
} else {
logs.push(Log::simple("Read remote resources", log));
};
} else if full_path.is_dir() {
if let Err(e) = read_resources_directory(
root_path,
&resource_path,
&PathBuf::new(),
match_tags,
&mut resources,
&mut log,
files,
file_errors,
)
.with_context(|| {
format!("Failed to read resources from {full_path:?}")
}) {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(&e.into()),
});
logs.push(Log::error("Read remote resources", log));
} else {
logs.push(Log::simple("Read remote resources", log));
};
} else if !full_path.exists() {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(
&anyhow!("Initialize the file to proceed.")
.context(format!("Path {full_path:?} does not exist."))
.into(),
),
});
log.push_str(&format!(
"{}: Resoure path {} does not exist.",
colored("ERROR", Color::Red),
bold(resource_path.display())
));
logs.push(Log::error("Read remote resources", log));
} else {
log.push_str(&format!(
"{}: Resoure path {} exists, but is neither a file nor a directory.",
colored("WARN", Color::Red),
bold(resource_path.display())
));
logs.push(Log::error("Read remote resources", log));
}
}
Ok(resources)
}
fn read_resources_recursive(
path: &Path,
/// Use when incoming resource path is a file.
fn read_resource_file(
root_path: &Path,
// relative to root path.
resource_path: Option<&Path>,
// relative to resource path if provided, or root path.
file_path: &Path,
match_tags: &[String],
resources: &mut ResourcesToml,
log: &mut String,
files: &mut Vec<FileContents>,
file_errors: &mut Vec<FileContents>,
files: &mut Vec<SyncFileContents>,
) -> anyhow::Result<()> {
let res =
fs::metadata(path).context("failed to get path metadata")?;
if res.is_file() {
if !path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let contents = std::fs::read_to_string(path)
.context("failed to read file contents")?;
files.push(FileContents {
path: path.display().to_string(),
contents: contents.clone(),
});
let more = toml::from_str::<ResourcesToml>(&contents)
// the error without this comes through with multiple lines (\n) and looks bad
.map_err(|e| anyhow!("{e:#}"))
.context("failed to parse resource file contents")?;
let full_path = if let Some(resource_path) = resource_path {
root_path.join(resource_path).join(file_path)
} else {
root_path.join(file_path)
};
if !full_path
.extension()
.map(|ext| ext == "toml")
.unwrap_or_default()
{
return Ok(());
}
let contents = std::fs::read_to_string(&full_path)
.context("failed to read file contents")?;
log.push('\n');
log.push_str(&format!(
"{}: {} from {}",
muted("INFO"),
colored("adding resources", Color::Green),
colored(path.display(), Color::Blue)
));
files.push(SyncFileContents {
resource_path: resource_path
.map(|path| path.display().to_string())
.unwrap_or_default(),
path: file_path.display().to_string(),
contents: contents.clone(),
});
let more = toml::from_str::<ResourcesToml>(&contents)
// the error without this comes through with multiple lines (\n) and looks bad
.map_err(|e| anyhow!("{e:#}"))
.context("failed to parse resource file contents")?;
log.push('\n');
let path_for_view =
if let Some(resource_path) = resource_path.as_ref() {
resource_path.join(file_path)
} else {
file_path.to_path_buf()
};
log.push_str(&format!(
"{}: {} from {}",
muted("INFO"),
colored("adding resources", Color::Green),
colored(path_for_view.display(), Color::Blue)
));
extend_resources(resources, more, match_tags);
extend_resources(resources, more, match_tags);
Ok(())
} else if res.is_dir() {
let directory = fs::read_dir(path)
.context("failed to read directory contents")?;
for entry in directory.into_iter().flatten() {
let path = entry.path();
if let Err(e) = read_resources_recursive(
&path,
Ok(())
}
/// Reads down into directories.
fn read_resources_directory(
root_path: &Path,
// relative to root path.
resource_path: &Path,
// relative to resource path. start as empty path
curr_path: &Path,
match_tags: &[String],
resources: &mut ResourcesToml,
log: &mut String,
files: &mut Vec<SyncFileContents>,
file_errors: &mut Vec<SyncFileContents>,
) -> anyhow::Result<()> {
let full_resource_path = root_path.join(resource_path);
let full_path = full_resource_path.join(curr_path);
let directory = fs::read_dir(&full_path).with_context(|| {
format!("Failed to read directory contents at {full_path:?}")
})?;
for entry in directory.into_iter().flatten() {
let path = entry.path();
let curr_path =
path.strip_prefix(&full_resource_path).unwrap_or(&path);
if path.is_file() {
if let Err(e) = read_resource_file(
root_path,
Some(resource_path),
curr_path,
match_tags,
resources,
log,
files,
)
.with_context(|| {
format!("failed to read resources from {full_path:?}")
}) {
file_errors.push(SyncFileContents {
resource_path: String::new(),
path: resource_path.display().to_string(),
contents: format_serror(&e.into()),
});
};
} else if path.is_dir() {
if let Err(e) = read_resources_directory(
root_path,
resource_path,
curr_path,
match_tags,
resources,
log,
@@ -95,8 +221,9 @@ fn read_resources_recursive(
.with_context(|| {
format!("failed to read resources from {path:?}")
}) {
file_errors.push(FileContents {
path: path.display().to_string(),
file_errors.push(SyncFileContents {
resource_path: resource_path.display().to_string(),
path: curr_path.display().to_string(),
contents: format_serror(&e.into()),
});
log.push('\n');
@@ -108,10 +235,8 @@ fn read_resources_recursive(
));
}
}
Ok(())
} else {
Err(anyhow!("resources path is neither file nor directory"))
}
Ok(())
}
pub fn extend_resources(
@@ -137,6 +262,9 @@ pub fn extend_resources(
resources
.procedures
.extend(filter_by_tag(more.procedures, match_tags));
resources
.actions
.extend(filter_by_tag(more.actions, match_tags));
resources
.alerters
.extend(filter_by_tag(more.alerters, match_tags));

View File

@@ -1,14 +1,17 @@
use std::{collections::HashMap, str::FromStr};
use komodo_client::entities::{
alerter::Alerter, build::Build, builder::Builder,
action::Action, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, toml::ResourceToml, ResourceTarget,
ResourceTargetVariant,
};
use mungos::mongodb::bson::oid::ObjectId;
use toml::ToToml;
use crate::resource::KomodoResource;
pub mod deploy;
pub mod execute;
pub mod file;
@@ -38,22 +41,44 @@ pub trait ResourceSyncTrait: ToToml + Sized {
/// To exclude resource syncs with "file_contents" (they aren't compatible)
fn include_resource(
name: &String,
_config: &Self::Config,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
include_resource_by_tags(resource_tags, id_to_tags, match_tags)
include_resource_by_resource_type_and_name::<Self>(
match_resource_type,
match_resources,
name,
) && include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
)
}
/// To exclude resource syncs with "file_contents" (they aren't compatible)
fn include_resource_partial(
name: &String,
_config: &Self::PartialConfig,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
include_resource_by_tags(resource_tags, id_to_tags, match_tags)
include_resource_by_resource_type_and_name::<Self>(
match_resource_type,
match_resources,
name,
) && include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
)
}
/// Apply any changes to incoming toml partial config
@@ -90,6 +115,31 @@ pub fn include_resource_by_tags(
match_tags.iter().all(|tag| tag_names.contains(&tag))
}
pub fn include_resource_by_resource_type_and_name<
T: KomodoResource,
>(
resource_type: Option<ResourceTargetVariant>,
resources: Option<&[String]>,
name: &String,
) -> bool {
match (resource_type, resources) {
(Some(resource_type), Some(resources)) => {
if T::resource_type() != resource_type {
return false;
}
resources.contains(name)
}
(Some(resource_type), None) => {
if T::resource_type() != resource_type {
return false;
}
true
}
(None, Some(resources)) => resources.contains(name),
(None, None) => true,
}
}
pub struct AllResourcesById {
pub servers: HashMap<String, Server>,
pub deployments: HashMap<String, Deployment>,
@@ -97,6 +147,7 @@ pub struct AllResourcesById {
pub builds: HashMap<String, Build>,
pub repos: HashMap<String, Repo>,
pub procedures: HashMap<String, Procedure>,
pub actions: HashMap<String, Action>,
pub builders: HashMap<String, Builder>,
pub alerters: HashMap<String, Alerter>,
pub templates: HashMap<String, ServerTemplate>,
@@ -131,6 +182,10 @@ impl AllResourcesById {
id_to_tags, match_tags,
)
.await?,
actions: crate::resource::get_id_to_resource_map::<Action>(
id_to_tags, match_tags,
)
.await?,
builders: crate::resource::get_id_to_resource_map::<Builder>(
id_to_tags, match_tags,
)

View File

@@ -1,10 +1,11 @@
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use git::GitRes;
use komodo_client::entities::{
sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs,
FileContents,
sync::{ResourceSync, SyncFileContents},
to_komodo_name,
toml::ResourcesToml,
update::Log,
CloneArgs,
};
use crate::{config::core_config, helpers::git_token};
@@ -13,8 +14,8 @@ use super::file::extend_resources;
pub struct RemoteResources {
pub resources: anyhow::Result<ResourcesToml>,
pub files: Vec<FileContents>,
pub file_errors: Vec<FileContents>,
pub files: Vec<SyncFileContents>,
pub file_errors: Vec<SyncFileContents>,
pub logs: Vec<Log>,
pub hash: Option<String>,
pub message: Option<String>,
@@ -28,15 +29,14 @@ pub async fn get_remote_resources(
// =============
// FILES ON HOST
// =============
let path = sync
.config
.resource_path
.parse::<PathBuf>()
.context("Resource path is not valid path")?;
let root_path = core_config()
.sync_directory
.join(to_komodo_name(&sync.name));
let (mut logs, mut files, mut file_errors) =
(Vec::new(), Vec::new(), Vec::new());
let resources = super::file::read_resources(
&path,
&root_path,
&sync.config.resource_path,
&sync.config.match_tags,
&mut logs,
&mut files,
@@ -50,9 +50,7 @@ pub async fn get_remote_resources(
hash: None,
message: None,
});
} else if sync.config.managed
|| !sync.config.file_contents.is_empty()
{
} else if sync.config.repo.is_empty() {
// ==========
// UI DEFINED
// ==========
@@ -72,10 +70,10 @@ pub async fn get_remote_resources(
Ok(resources)
};
// filter_by_
return Ok(RemoteResources {
resources,
files: vec![FileContents {
files: vec![SyncFileContents {
resource_path: String::new(),
path: "database file".to_string(),
contents: sync.config.file_contents.clone(),
}],
@@ -101,10 +99,10 @@ pub async fn get_remote_resources(
let access_token = if let Some(account) = &clone_args.account {
git_token(&clone_args.provider, account, |https| clone_args.https = https)
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
)?
.await
.with_context(
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
)?
} else {
None
};
@@ -139,11 +137,10 @@ pub async fn get_remote_resources(
let message =
message.context("failed to get commit hash message")?;
let resource_path = repo_path.join(&sync.config.resource_path);
let (mut files, mut file_errors) = (Vec::new(), Vec::new());
let resources = super::file::read_resources(
&resource_path,
&repo_path,
&sync.config.resource_path,
&sync.config.match_tags,
&mut logs,
&mut files,

View File

@@ -4,6 +4,7 @@ use formatting::{bold, colored, muted, Color};
use komodo_client::{
api::execute::Execution,
entities::{
action::Action,
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig},
@@ -17,7 +18,7 @@ use komodo_client::{
tag::Tag,
update::Log,
user::sync_user,
ResourceTarget,
ResourceTarget, ResourceTargetVariant,
},
};
use partial_derive2::{MaybeNone, PartialDiff};
@@ -31,8 +32,10 @@ use crate::{
};
use super::{
execute::ExecuteResourceSync, include_resource_by_tags,
AllResourcesById, ResourceSyncTrait, ToCreate, ToDelete, ToUpdate,
execute::ExecuteResourceSync,
include_resource_by_resource_type_and_name,
include_resource_by_tags, AllResourcesById, ResourceSyncTrait,
ToCreate, ToDelete, ToUpdate,
};
impl ResourceSyncTrait for Server {
@@ -231,18 +234,41 @@ impl ResourceSyncTrait for ServerTemplate {
impl ExecuteResourceSync for ServerTemplate {}
impl ResourceSyncTrait for Action {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::Action(id)
}
fn get_diff(
original: Self::Config,
update: Self::PartialConfig,
_resources: &AllResourcesById,
) -> anyhow::Result<Self::ConfigDiff> {
Ok(original.partial_diff(update))
}
}
impl ExecuteResourceSync for Action {}
impl ResourceSyncTrait for ResourceSync {
fn resource_target(id: String) -> ResourceTarget {
ResourceTarget::ResourceSync(id)
}
fn include_resource(
name: &String,
config: &Self::Config,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
if !include_resource_by_tags(
if !include_resource_by_resource_type_and_name::<ResourceSync>(
match_resource_type,
match_resources,
name,
) || !include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
@@ -259,17 +285,24 @@ impl ResourceSyncTrait for ResourceSync {
}
// The file contents MUST be empty
contents_empty &&
// The sync must be files on host mode OR NOT managed
(config.files_on_host || !config.managed)
// The sync must be files on host mode OR git repo mode
(config.files_on_host || !config.repo.is_empty())
}
fn include_resource_partial(
name: &String,
config: &Self::PartialConfig,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
resource_tags: &[String],
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
) -> bool {
if !include_resource_by_tags(
if !include_resource_by_resource_type_and_name::<ResourceSync>(
match_resource_type,
match_resources,
name,
) || !include_resource_by_tags(
resource_tags,
id_to_tags,
match_tags,
@@ -291,8 +324,8 @@ impl ResourceSyncTrait for ResourceSync {
}
// The file contents MUST be empty
contents_empty &&
// The sync must be files on host mode OR NOT managed
(files_on_host || !config.managed.unwrap_or_default())
// The sync must be files on host mode OR git repo mode
(files_on_host || !config.repo.as_deref().unwrap_or_default().is_empty())
}
fn get_diff(
@@ -327,6 +360,13 @@ impl ResourceSyncTrait for Procedure {
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunAction(config) => {
config.action = resources
.actions
.get(&config.action)
.map(|p| p.name.clone())
.unwrap_or_default();
}
Execution::RunBuild(config) => {
config.build = resources
.builds
@@ -572,6 +612,13 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::CommitSync(config) => {
config.sync = resources
.syncs
.get(&config.sync)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::DeployStack(config) => {
config.stack = resources
.stacks
@@ -579,6 +626,13 @@ impl ResourceSyncTrait for Procedure {
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::DeployStackIfChanged(config) => {
config.stack = resources
.stacks
.get(&config.stack)
.map(|s| s.name.clone())
.unwrap_or_default();
}
Execution::StartStack(config) => {
config.stack = resources
.stacks
@@ -652,14 +706,14 @@ impl ExecuteResourceSync for Procedure {
{
has_error = true;
log.push_str(&format!(
"{}: failed to delete {} '{}' | {e:#}",
"\n{}: failed to delete {} '{}' | {e:#}",
colored("ERROR", Color::Red),
Self::resource_type(),
bold(&name),
))
} else {
log.push_str(&format!(
"{}: {} {} '{}'",
"\n{}: {} {} '{}'",
muted("INFO"),
colored("deleted", Color::Red),
Self::resource_type(),

View File

@@ -4,6 +4,7 @@ use anyhow::Context;
use komodo_client::{
api::execute::Execution,
entities::{
action::Action,
alerter::Alerter,
build::Build,
builder::{Builder, BuilderConfig, PartialBuilderConfig},
@@ -105,6 +106,8 @@ pub fn resource_toml_to_toml_string<R: ToToml>(
pub fn resource_push_to_toml<R: ToToml>(
mut resource: Resource<R::Config, R::Info>,
deploy: bool,
after: Vec<String>,
toml: &mut String,
all: &AllResourcesById,
all_tags: &HashMap<String, Tag>,
@@ -116,7 +119,7 @@ pub fn resource_push_to_toml<R: ToToml>(
toml
.push_str(&format!("[[{}]]\n", R::resource_type().toml_header()));
R::push_to_toml_string(
convert_resource::<R>(resource, all_tags),
convert_resource::<R>(resource, deploy, after, all_tags),
toml,
)?;
Ok(())
@@ -124,16 +127,22 @@ pub fn resource_push_to_toml<R: ToToml>(
pub fn resource_to_toml<R: ToToml>(
resource: Resource<R::Config, R::Info>,
deploy: bool,
after: Vec<String>,
all: &AllResourcesById,
all_tags: &HashMap<String, Tag>,
) -> anyhow::Result<String> {
let mut toml = String::new();
resource_push_to_toml::<R>(resource, &mut toml, all, all_tags)?;
resource_push_to_toml::<R>(
resource, deploy, after, &mut toml, all, all_tags,
)?;
Ok(toml)
}
pub fn convert_resource<R: KomodoResource>(
resource: Resource<R::Config, R::Info>,
deploy: bool,
after: Vec<String>,
all_tags: &HashMap<String, Tag>,
) -> ResourceToml<R::PartialConfig> {
ResourceToml {
@@ -144,9 +153,8 @@ pub fn convert_resource<R: KomodoResource>(
.filter_map(|t| all_tags.get(t).map(|t| t.name.clone()))
.collect(),
description: resource.description,
deploy: false,
after: Default::default(),
latest_hash: false,
deploy,
after,
// The config still needs to be minimized.
// This happens in ToToml::push_to_toml
config: resource.config.into(),
@@ -157,6 +165,7 @@ pub fn convert_resource<R: KomodoResource>(
impl ToToml for Alerter {}
impl ToToml for Server {}
impl ToToml for ResourceSync {}
impl ToToml for Action {}
impl ToToml for Stack {
fn replace_ids(
@@ -405,6 +414,13 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::RunAction(exec) => exec.action.clone_from(
all
.actions
.get(&exec.action)
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::RunBuild(exec) => exec.build.clone_from(
all
.builds
@@ -680,6 +696,13 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::CommitSync(exec) => exec.sync.clone_from(
all
.syncs
.get(&exec.sync)
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::DeployStack(exec) => exec.stack.clone_from(
all
.stacks
@@ -687,6 +710,15 @@ impl ToToml for Procedure {
.map(|r| &r.name)
.unwrap_or(&String::new()),
),
Execution::DeployStackIfChanged(exec) => {
exec.stack.clone_from(
all
.stacks
.get(&exec.stack)
.map(|r| &r.name)
.unwrap_or(&String::new()),
)
}
Execution::StartStack(exec) => exec.stack.clone_from(
all
.stacks

View File

@@ -15,7 +15,8 @@ use komodo_client::{
sync::DiffData,
toml::{PermissionToml, UserGroupToml},
update::Log,
user::sync_user,
user::{sync_user, User},
user_group::UserGroup,
ResourceTarget, ResourceTargetVariant,
},
};
@@ -43,17 +44,21 @@ pub async fn get_updates_for_view(
delete: bool,
all_resources: &AllResourcesById,
) -> anyhow::Result<Vec<DiffData>> {
let map = find_collect(&db_client().user_groups, None, None)
let _curr = find_collect(&db_client().user_groups, None, None)
.await
.context("failed to query db for UserGroups")?
.context("failed to query db for UserGroups")?;
let mut curr = Vec::with_capacity(_curr.capacity());
convert_user_groups(_curr.into_iter(), all_resources, &mut curr)
.await?;
let map = curr
.into_iter()
.map(|ug| (ug.name.clone(), ug))
.map(|ug| (ug.1.name.clone(), ug))
.collect::<HashMap<_, _>>();
let mut diffs = Vec::<DiffData>::new();
if delete {
for user_group in map.values() {
for (_id, user_group) in map.values() {
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
diffs.push(DiffData::Delete {
current: format!(
@@ -66,13 +71,6 @@ pub async fn get_updates_for_view(
}
}
let id_to_user = find_collect(&db_client().users, None, None)
.await
.context("failed to query db for Users")?
.into_iter()
.map(|user| (user.id.clone(), user))
.collect::<HashMap<_, _>>();
for mut user_group in user_groups {
user_group
.permissions
@@ -90,10 +88,14 @@ pub async fn get_updates_for_view(
)
})?;
let original = match map.get(&user_group.name).cloned() {
let (_original_id, original) = match map
.get(&user_group.name)
.cloned()
{
Some(original) => original,
None => {
diffs.push(DiffData::Create {
name: user_group.name.clone(),
proposed: format!(
"[[user_group]]\n{}",
toml_pretty::to_string(&user_group, TOML_PRETTY_OPTIONS)
@@ -103,121 +105,16 @@ pub async fn get_updates_for_view(
continue;
}
};
let mut original_users = original
.users
.clone()
.into_iter()
.filter_map(|user_id| {
id_to_user.get(&user_id).map(|u| u.username.clone())
})
.collect::<Vec<_>>();
let mut original_permissions = State
.resolve(
ListUserTargetPermissions {
user_target: UserTarget::UserGroup(original.id.clone()),
},
sync_user().to_owned(),
)
.await
.context("failed to query for existing UserGroup permissions")?
.into_iter()
.filter(|p| p.level > PermissionLevel::None)
.map(|mut p| {
// replace the ids with names
match &mut p.resource_target {
ResourceTarget::System(_) => {}
ResourceTarget::Build(id) => {
*id = all_resources
.builds
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = all_resources
.builders
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = all_resources
.deployments
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = all_resources
.servers
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = all_resources
.repos
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = all_resources
.alerters
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = all_resources
.procedures
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = all_resources
.templates
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = all_resources
.syncs
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Stack(id) => {
*id = all_resources
.stacks
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
}
PermissionToml {
target: p.resource_target,
level: p.level,
}
})
.collect::<Vec<_>>();
original_users.sort();
user_group.users.sort();
let all_diff = diff_group_all(&original.all, &user_group.all);
user_group.permissions.sort_by(sort_permissions);
original_permissions.sort_by(sort_permissions);
let update_users = user_group.users != original_users;
let update_users = user_group.users != original.users;
let update_all = !all_diff.is_empty();
let update_permissions =
user_group.permissions != original_permissions;
user_group.permissions != original.permissions;
// only add log after diff detected
if update_users || update_all || update_permissions {
@@ -378,6 +275,13 @@ pub async fn get_updates_for_execution(
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::Action(id) => {
*id = all_resources
.actions
.get(id)
.map(|b| b.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = all_resources
.templates
@@ -819,6 +723,17 @@ async fn expand_user_group_permissions(
});
expanded.extend(permissions);
}
ResourceTargetVariant::Action => {
let permissions = all_resources
.actions
.values()
.filter(|resource| regex.is_match(&resource.name))
.map(|resource| PermissionToml {
target: ResourceTarget::Action(resource.name.clone()),
level: permission.level,
});
expanded.extend(permissions);
}
ResourceTargetVariant::ServerTemplate => {
let permissions = all_resources
.templates
@@ -899,3 +814,139 @@ fn diff_group_all(
to_update
}
pub async fn convert_user_groups(
user_groups: impl Iterator<Item = UserGroup>,
all: &AllResourcesById,
res: &mut Vec<(String, UserGroupToml)>,
) -> anyhow::Result<()> {
let db = db_client();
let usernames = find_collect(&db.users, None, None)
.await?
.into_iter()
.map(|user| (user.id, user.username))
.collect::<HashMap<_, _>>();
for user_group in user_groups {
// this method is admin only, but we already know user can see user group if above does not return Err
let mut permissions = State
.resolve(
ListUserTargetPermissions {
user_target: UserTarget::UserGroup(user_group.id.clone()),
},
User {
admin: true,
..Default::default()
},
)
.await?
.into_iter()
.map(|mut permission| {
match &mut permission.resource_target {
ResourceTarget::Build(id) => {
*id = all
.builds
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Builder(id) => {
*id = all
.builders
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Deployment(id) => {
*id = all
.deployments
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Server(id) => {
*id = all
.servers
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Repo(id) => {
*id = all
.repos
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Alerter(id) => {
*id = all
.alerters
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Procedure(id) => {
*id = all
.procedures
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Action(id) => {
*id = all
.actions
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::ServerTemplate(id) => {
*id = all
.templates
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::ResourceSync(id) => {
*id = all
.syncs
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::Stack(id) => {
*id = all
.stacks
.get(id)
.map(|r| r.name.clone())
.unwrap_or_default()
}
ResourceTarget::System(_) => {}
}
PermissionToml {
target: permission.resource_target,
level: permission.level,
}
})
.collect::<Vec<_>>();
let mut users = user_group
.users
.into_iter()
.filter_map(|user_id| usernames.get(&user_id).cloned())
.collect::<Vec<_>>();
permissions.sort_by(sort_permissions);
users.sort();
res.push((
user_group.id,
UserGroupToml {
name: user_group.name,
users,
all: user_group.all,
permissions,
},
));
}
Ok(())
}

View File

@@ -75,6 +75,7 @@ pub async fn get_updates_for_view(
}
None => {
diffs.push(DiffData::Create {
name: variable.name.clone(),
proposed: format!(
"[[variable]]\n{}",
toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS)

View File

@@ -5,6 +5,7 @@ use komodo_client::entities::{
sync::{DiffData, ResourceDiff},
tag::Tag,
toml::ResourceToml,
ResourceTargetVariant,
};
use mungos::find::find_collect;
use partial_derive2::MaybeNone;
@@ -15,22 +16,45 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
resources: Vec<ResourceToml<Resource::PartialConfig>>,
delete: bool,
all_resources: &AllResourcesById,
all_tags: &HashMap<String, Tag>,
match_resource_type: Option<ResourceTargetVariant>,
match_resources: Option<&[String]>,
id_to_tags: &HashMap<String, Tag>,
match_tags: &[String],
diffs: &mut Vec<ResourceDiff>,
) -> anyhow::Result<()> {
let current_map = find_collect(Resource::coll().await, None, None)
let current_map = find_collect(Resource::coll(), None, None)
.await
.context("failed to get resources from db")?
.into_iter()
.filter(|r| {
Resource::include_resource(
&r.config, &r.tags, all_tags, match_tags,
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.map(|r| (r.name.clone(), r))
.collect::<HashMap<_, _>>();
let resources = resources
.into_iter()
.filter(|r| {
Resource::include_resource_partial(
&r.name,
&r.config,
match_resource_type,
match_resources,
&r.tags,
id_to_tags,
match_tags,
)
})
.collect::<Vec<_>>();
if delete {
for current_resource in current_map.values() {
if !resources.iter().any(|r| r.name == current_resource.name) {
@@ -41,8 +65,10 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
data: DiffData::Delete {
current: super::toml::resource_to_toml::<Resource>(
current_resource.clone(),
false,
vec![],
all_resources,
all_tags,
id_to_tags,
)?,
},
});
@@ -51,15 +77,6 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
}
for mut proposed_resource in resources {
// only resource that might not be included is resource sync
if !Resource::include_resource_partial(
&proposed_resource.config,
&proposed_resource.tags,
all_tags,
match_tags,
) {
continue;
}
match current_map.get(&proposed_resource.name) {
Some(current_resource) => {
// First merge toml resource config (partial) onto default resource config.
@@ -87,7 +104,7 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
let current_tags = current_resource
.tags
.iter()
.filter_map(|id| all_tags.get(id).map(|t| t.name.clone()))
.filter_map(|id| id_to_tags.get(id).map(|t| t.name.clone()))
.collect::<Vec<_>>();
// Only proceed if there are any fields to update,
@@ -105,12 +122,14 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
current_resource.id.clone(),
),
data: DiffData::Update {
proposed,
current: super::toml::resource_to_toml::<Resource>(
current_resource.clone(),
proposed_resource.deploy,
proposed_resource.after,
all_resources,
all_tags,
id_to_tags,
)?,
proposed,
},
});
}
@@ -120,6 +139,7 @@ pub async fn push_updates_for_view<Resource: ResourceSyncTrait>(
target: Resource::resource_target(String::new()),
data: DiffData::Create {
name: proposed_resource.name.clone(),
proposed: super::toml::resource_toml_to_toml_string::<
Resource,
>(proposed_resource)?,

Some files were not shown because too many files have changed in this diff Show More