1.14 - Rename to Komodo - Docker Management (#56)

* setup network page

* add Network, Image, Container

* Docker ListItems and Inspects

* frontend build

* dev0

* network info working

* fix cargo lock

* dev1

* pages for the things

* implement Active in dashboard

* RunBuild update trigger list refresh

* rename deployment executions to StartDeployment etc

* add server level container control

* dev2

* add Config field to Image

* can get image labels from Config.Labels

* mount container page

* server show resource count

* add GetContainerLog api

* add _AllContainers api

* dev3

* move ResourceTarget to entities mod

* GetResourceMatchingContainer api

* connect container to resource

* dev4 add volume names to container list items

* ts types

* volume / image / network unused management

* add image history to image page

* fix PruneContainers incorret Operation

* update cache for server for server after server actions

* dev5

* add singapore to Hetzner

* implement delete single network / image / volume api

* dev6

* include "in use" on Docker Lists

* add docker resource delete buttons

* is nice

* fix volume all in use

* remove google font dependency

* use host networking in test compose

* implement Secret Variables (hidden in logs)

* remove unneeded borrow

* interpolate variables / secrets into extra args / onclone / onpull / command etc

* validate empty strings before SelectItem

* rename everything to Komodo

* rename workspace to komodo

* rc1
This commit is contained in:
Maxwell Becker
2024-09-02 01:38:40 +03:00
committed by GitHub
parent 2463ed3879
commit 5fc0a87dea
429 changed files with 14351 additions and 7077 deletions

93
.vscode/tasks.json vendored
View File

@@ -1,93 +0,0 @@
{
"version": "2.0.0",
"tasks": [
{
"type": "cargo",
"command": "build",
"group": {
"kind": "build",
"isDefault": true
},
"label": "rust: cargo build"
},
{
"type": "cargo",
"command": "fmt",
"label": "rust: cargo fmt"
},
{
"type": "cargo",
"command": "check",
"label": "rust: cargo check"
},
{
"label": "start dev",
"dependsOn": [
"run core",
"start frontend"
],
"problemMatcher": []
},
{
"type": "shell",
"command": "yarn start",
"label": "start frontend",
"options": {
"cwd": "${workspaceFolder}/frontend"
},
"presentation": {
"group": "start"
}
},
{
"type": "cargo",
"command": "run",
"label": "run core",
"options": {
"cwd": "${workspaceFolder}/bin/core"
},
"presentation": {
"group": "start"
}
},
{
"type": "cargo",
"command": "run",
"label": "run periphery",
"options": {
"cwd": "${workspaceFolder}/bin/periphery"
}
},
{
"type": "cargo",
"command": "run",
"label": "run tests",
"options": {
"cwd": "${workspaceFolder}/bin/tests"
}
},
{
"type": "cargo",
"command": "publish",
"args": ["--allow-dirty"],
"label": "publish types",
"options": {
"cwd": "${workspaceFolder}/lib/types"
}
},
{
"type": "cargo",
"command": "publish",
"label": "publish rs client",
"options": {
"cwd": "${workspaceFolder}/lib/rs_client"
}
},
{
"type": "shell",
"command": "node ./client/ts/generate_types.mjs",
"label": "generate typescript types",
"problemMatcher": []
}
]
}

1259
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,19 +3,20 @@ resolver = "2"
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
[workspace.package]
version = "1.13.4"
version = "1.14.0-rc1"
edition = "2021"
authors = ["mbecker20 <becker.maxh@gmail.com>"]
license = "GPL-3.0-or-later"
repository = "https://github.com/mbecker20/monitor"
homepage = "https://docs.monitor.dev"
repository = "https://github.com/mbecker20/komodo"
homepage = "https://komo.do"
[patch.crates-io]
monitor_client = { path = "client/core/rs" }
# komodo_client = { path = "client/core/rs" }
[workspace.dependencies]
# LOCAL
monitor_client = "1.13.3"
# komodo_client = "1.14.0"
komodo_client = { path = "client/core/rs" }
periphery_client = { path = "client/periphery/rs" }
formatting = { path = "lib/formatting" }
command = { path = "lib/command" }
@@ -35,12 +36,12 @@ derive_variants = "1.0.0"
mongo_indexed = "2.0.1"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
mungos = "1.0.1"
mungos = "1.1.0"
svi = "1.0.1"
# ASYNC
tokio = { version = "1.39.2", features = ["full"] }
reqwest = { version = "0.12.5", features = ["json"] }
tokio = { version = "1.40.0", features = ["full"] }
reqwest = { version = "0.12.7", features = ["json"] }
tokio-util = "0.7.11"
futures = "0.3.30"
futures-util = "0.3.30"
@@ -53,9 +54,9 @@ tokio-tungstenite = "0.23.1"
# SER/DE
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
serde = { version = "1.0.208", features = ["derive"] }
serde = { version = "1.0.209", features = ["derive"] }
strum = { version = "0.26.3", features = ["derive"] }
serde_json = "1.0.125"
serde_json = "1.0.127"
serde_yaml = "0.9.34"
toml = "0.8.19"
@@ -90,16 +91,16 @@ jwt = "0.16.0"
hex = "0.4.3"
# SYSTEM
bollard = "0.17.0"
sysinfo = "0.31.2"
bollard = "0.17.1"
sysinfo = "0.31.4"
# CLOUD
aws-config = "1.5.5"
aws-sdk-ec2 = "1.66.0"
aws-sdk-ecr = "1.40.0"
aws-sdk-ec2 = "1.70.0"
aws-sdk-ecr = "1.42.0"
# MISC
derive_builder = "0.20.0"
derive_builder = "0.20.1"
typeshare = "1.0.3"
octorust = "0.7.0"
colored = "2.1.0"

View File

@@ -11,7 +11,7 @@ repository.workspace = true
[dependencies]
# local
monitor_client.workspace = true
komodo_client.workspace = true
logger.workspace = true
# external
tokio.workspace = true

View File

@@ -1,11 +1,11 @@
FROM rust:1.71.1 as builder
FROM rust:1.80.1 as builder
WORKDIR /builder
COPY . .
RUN cargo build -p alert_logger --release
FROM gcr.io/distroless/cc
FROM gcr.io/distroless/debian-cc
COPY --from=builder /builder/target/release/alert_logger /

View File

@@ -1,4 +1,4 @@
# Alerter
This crate sets up a basic axum server that listens for incoming alert POSTs.
It can be used as a monitor alerting endpoint, and serves as a template for other custom alerter implementations.
It can be used as a Komodo alerting endpoint, and serves as a template for other custom alerter implementations.

View File

@@ -5,9 +5,7 @@ use std::{net::SocketAddr, str::FromStr};
use anyhow::Context;
use axum::{routing::post, Json, Router};
use monitor_client::entities::{
alert::Alert, server::stats::SeverityLevel,
};
use komodo_client::entities::alert::{Alert, SeverityLevel};
use serde::Deserialize;
#[derive(Deserialize)]

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_cli"
description = "Command line tool to sync monitor resources and execute file defined procedures"
name = "komodo_cli"
description = "Command line tool to execute Komodo actions"
version.workspace = true
edition.workspace = true
authors.workspace = true
@@ -9,14 +9,14 @@ homepage.workspace = true
repository.workspace = true
[[bin]]
name = "monitor"
name = "komodo"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# local
monitor_client.workspace = true
komodo_client.workspace = true
# external
tracing-subscriber.workspace = true
merge_config_files.workspace = true

View File

@@ -1,11 +1,11 @@
# Monitor CLI
# Komodo CLI
Monitor CLI is a tool to sync monitor resources and execute operations.
Komodo CLI is a tool to execute actions on your Komodo instance from shell scripts.
## Install
```sh
cargo install monitor_cli
cargo install komodo_cli
```
Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-dev`.
@@ -14,9 +14,9 @@ Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-de
### Credentials
Configure a file `~/.config/monitor/creds.toml` file with contents:
Configure a file `~/.config/komodo/creds.toml` file with contents:
```toml
url = "https://your.monitor.address"
url = "https://your.komodo.address"
key = "YOUR-API-KEY"
secret = "YOUR-API-SECRET"
```
@@ -25,21 +25,21 @@ Note. You can specify a different creds file by using `--creds ./other/path.toml
You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`:
```sh
monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
komodo --url "https://your.komodo.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
```
### Run Executions
```sh
# Triggers an example build
monitor execute run-build test_build
komodo execute run-build test_build
```
#### Manual
```md
Runs an execution
Usage: monitor execute <COMMAND>
Usage: komodo execute <COMMAND>
Commands:
none The "null" execution. Does nothing

View File

@@ -1,5 +1,5 @@
use clap::{Parser, Subcommand};
use monitor_client::api::execute::Execution;
use komodo_client::api::execute::Execution;
use serde::Deserialize;
#[derive(Parser, Debug)]
@@ -34,7 +34,7 @@ pub struct CliArgs {
fn default_creds() -> String {
let home =
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
format!("{home}/.config/monitor/creds.toml")
format!("{home}/.config/komodo/creds.toml")
}
#[derive(Debug, Clone, Subcommand)]

View File

@@ -1,11 +1,11 @@
use std::time::Duration;
use colored::Colorize;
use monitor_client::api::execute::Execution;
use komodo_client::api::execute::Execution;
use crate::{
helpers::wait_for_enter,
state::{cli_args, monitor_client},
state::{cli_args, komodo_client},
};
pub async fn run(execution: Execution) -> anyhow::Result<()> {
@@ -33,6 +33,36 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::Deploy(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DestroyDeployment(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelRepoBuild(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
@@ -48,31 +78,46 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
Execution::StopContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DestroyContainer(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StartAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RestartAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PauseAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::UnpauseAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::StopAllContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RemoveContainer(data) => {
Execution::PruneContainers(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CloneRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PullRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::BuildRepo(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::CancelRepoBuild(data) => {
Execution::DeleteNetwork(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneNetworks(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::DeleteImage(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneImages(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneContainers(data) => {
Execution::DeleteVolume(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneVolumes(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::PruneSystem(data) => {
println!("{}: {data:?}", "Data".dimmed())
}
Execution::RunSync(data) => {
@@ -112,82 +157,127 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
let res = match execution {
Execution::RunProcedure(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::RunBuild(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::CancelBuild(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::Deploy(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::StartContainer(request) => {
monitor_client().execute(request).await
Execution::StartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::RestartContainer(request) => {
monitor_client().execute(request).await
Execution::RestartDeployment(request) => {
komodo_client().execute(request).await
}
Execution::PauseContainer(request) => {
monitor_client().execute(request).await
Execution::PauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseContainer(request) => {
monitor_client().execute(request).await
Execution::UnpauseDeployment(request) => {
komodo_client().execute(request).await
}
Execution::StopContainer(request) => {
monitor_client().execute(request).await
Execution::StopDeployment(request) => {
komodo_client().execute(request).await
}
Execution::StopAllContainers(request) => {
monitor_client().execute(request).await
}
Execution::RemoveContainer(request) => {
monitor_client().execute(request).await
Execution::DestroyDeployment(request) => {
komodo_client().execute(request).await
}
Execution::CloneRepo(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::PullRepo(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::BuildRepo(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::CancelRepoBuild(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::PruneNetworks(request) => {
monitor_client().execute(request).await
Execution::StartContainer(request) => {
komodo_client().execute(request).await
}
Execution::PruneImages(request) => {
monitor_client().execute(request).await
Execution::RestartContainer(request) => {
komodo_client().execute(request).await
}
Execution::PauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseContainer(request) => {
komodo_client().execute(request).await
}
Execution::StopContainer(request) => {
komodo_client().execute(request).await
}
Execution::DestroyContainer(request) => {
komodo_client().execute(request).await
}
Execution::StartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::RestartAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::UnpauseAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::StopAllContainers(request) => {
komodo_client().execute(request).await
}
Execution::PruneContainers(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::DeleteNetwork(request) => {
komodo_client().execute(request).await
}
Execution::PruneNetworks(request) => {
komodo_client().execute(request).await
}
Execution::DeleteImage(request) => {
komodo_client().execute(request).await
}
Execution::PruneImages(request) => {
komodo_client().execute(request).await
}
Execution::DeleteVolume(request) => {
komodo_client().execute(request).await
}
Execution::PruneVolumes(request) => {
komodo_client().execute(request).await
}
Execution::PruneSystem(request) => {
komodo_client().execute(request).await
}
Execution::RunSync(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::DeployStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::StartStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::RestartStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::PauseStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::UnpauseStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::StopStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::DestroyStack(request) => {
monitor_client().execute(request).await
komodo_client().execute(request).await
}
Execution::Sleep(request) => {
let duration =

View File

@@ -2,7 +2,7 @@
extern crate tracing;
use colored::Colorize;
use monitor_client::api::read::GetVersion;
use komodo_client::api::read::GetVersion;
mod args;
mod exec;
@@ -13,9 +13,14 @@ mod state;
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt().with_target(false).init();
info!(
"Komodo CLI version: {}",
env!("CARGO_PKG_VERSION").blue().bold()
);
let version =
state::monitor_client().read(GetVersion {}).await?.version;
info!("monitor version: {}", version.to_string().blue().bold());
state::komodo_client().read(GetVersion {}).await?.version;
info!("Komodo Core version: {}", version.blue().bold());
match &state::cli_args().command {
args::Command::Execute { execution } => {

View File

@@ -1,17 +1,17 @@
use std::sync::OnceLock;
use clap::Parser;
use komodo_client::KomodoClient;
use merge_config_files::parse_config_file;
use monitor_client::MonitorClient;
pub fn cli_args() -> &'static crate::args::CliArgs {
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
}
pub fn monitor_client() -> &'static MonitorClient {
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
MONITOR_CLIENT.get_or_init(|| {
pub fn komodo_client() -> &'static KomodoClient {
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
KOMODO_CLIENT.get_or_init(|| {
let args = cli_args();
let crate::args::CredsFile { url, key, secret } =
match (&args.url, &args.key, &args.secret) {
@@ -25,7 +25,7 @@ pub fn monitor_client() -> &'static MonitorClient {
(url, key, secret) => {
let mut creds: crate::args::CredsFile =
parse_config_file(cli_args().creds.as_str())
.expect("failed to parse monitor credentials");
.expect("failed to parse Komodo credentials");
if let Some(url) = url {
creds.url.clone_from(url);
@@ -40,7 +40,7 @@ pub fn monitor_client() -> &'static MonitorClient {
creds
}
};
futures::executor::block_on(MonitorClient::new(url, key, secret))
.expect("failed to initialize monitor client")
futures::executor::block_on(KomodoClient::new(url, key, secret))
.expect("failed to initialize Komodo client")
})
}

View File

@@ -1,5 +1,5 @@
[package]
name = "monitor_core"
name = "komodo_core"
version.workspace = true
edition.workspace = true
authors.workspace = true
@@ -15,7 +15,7 @@ path = "src/main.rs"
[dependencies]
# local
monitor_client = { workspace = true, features = ["mongo"] }
komodo_client = { workspace = true, features = ["mongo"] }
periphery_client.workspace = true
formatting.workspace = true
logger.workspace = true

View File

@@ -2,7 +2,7 @@
FROM rust:1.80.1-bookworm AS core-builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_core --release
RUN cargo build -p komodo_core --release
# Build Frontend
FROM node:20.12-alpine AS frontend-builder
@@ -10,7 +10,7 @@ WORKDIR /builder
COPY ./frontend ./frontend
COPY ./client/core/ts ./client
RUN cd client && yarn && yarn build && yarn link
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
# Final Image
FROM debian:bookworm-slim
@@ -32,8 +32,8 @@ COPY --from=frontend-builder /builder/frontend/dist /frontend
EXPOSE 9000
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
LABEL org.opencontainers.image.description="Monitor Core"
LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo
LABEL org.opencontainers.image.description="Komodo Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./core"]

View File

@@ -3,7 +3,7 @@ use std::{sync::OnceLock, time::Instant};
use anyhow::anyhow;
use axum::{http::HeaderMap, routing::post, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{api::auth::*, entities::user::User};
use komodo_client::{api::auth::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;

View File

@@ -3,19 +3,18 @@ use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use futures::future::join_all;
use monitor_client::{
use komodo_client::{
api::execute::{CancelBuild, Deploy, RunBuild},
entities::{
alert::{Alert, AlertData},
alert::{Alert, AlertData, SeverityLevel},
all_logs_success,
build::{Build, ImageRegistry, StandardRegistryConfig},
builder::{Builder, BuilderConfig},
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
deployment::DeploymentState,
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
server::stats::SeverityLevel,
to_monitor_name,
to_komodo_name,
update::{Log, Update},
user::{auto_redeploy_user, User},
},
@@ -28,7 +27,7 @@ use mungos::{
options::FindOneOptions,
},
};
use periphery_client::api::{self, git::RepoActionResponseV1_13};
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
@@ -40,16 +39,20 @@ use crate::{
builder::{cleanup_builder_instance, get_builder_periphery},
channel::build_cancel_channel,
git_token,
query::{get_deployment_state, get_global_variables},
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_extra_args,
interpolate_variables_secrets_into_system_command,
},
query::{get_deployment_state, get_variables_and_secrets},
registry_token,
update::update_update,
update::{init_execution_update, update_update},
},
resource::{self, refresh_build_state_cache},
state::{action_states, db_client, State},
};
use crate::helpers::update::init_execution_update;
use super::ExecuteRequest;
impl Resolve<RunBuild, (User, Update)> for State {
@@ -65,6 +68,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
PermissionLevel::Execute,
)
.await?;
let vars_and_secrets = get_variables_and_secrets().await?;
if build.config.builder_id.is_empty() {
return Err(anyhow!("Must attach builder to RunBuild"));
@@ -172,6 +176,29 @@ impl Resolve<RunBuild, (User, Update)> for State {
// CLONE REPO
let secret_replacers = if !build.config.skip_secret_interp {
// Interpolate variables / secrets into pre build command
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut build.config.pre_build,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
@@ -180,6 +207,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
replacers: secret_replacers.into_iter().collect(),
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
@@ -194,7 +222,6 @@ impl Resolve<RunBuild, (User, Update)> for State {
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
let res: RepoActionResponseV1_13 = res.into();
update.logs.extend(res.logs);
update.commit_hash =
res.commit_hash.unwrap_or_default().to_string();
@@ -214,90 +241,36 @@ impl Resolve<RunBuild, (User, Update)> for State {
if all_logs_success(&update.logs) {
let secret_replacers = if !build.config.skip_secret_interp {
let core_config = core_config();
let variables = get_global_variables().await?;
// Interpolate variables / secrets into build args
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
let mut secret_replacers_for_log = HashSet::new();
// Interpolate into build args
for arg in &mut build.config.build_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers
.iter()
.map(|(_, variable)| variable.clone()),
);
secret_replacers.extend(more_replacers);
arg.value = res;
}
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut build.config.build_args,
&mut global_replacers,
&mut secret_replacers,
)?;
// Interpolate into secret args
for arg in &mut build.config.secret_args {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&arg.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate global variables")?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers_for_log.extend(
more_replacers.into_iter().map(|(_, variable)| variable),
);
// Secret args don't need to be in replacers sent to periphery.
// The secret args don't end up in the command like build args do.
arg.value = res;
}
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut build.config.secret_args,
&mut global_replacers,
&mut secret_replacers,
)?;
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut build.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
if !secret_replacers_for_log.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers_for_log
.into_iter()
.map(|variable| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
add_interp_update_log(
&mut update,
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
@@ -354,7 +327,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
doc! { "$set": {
"config.version": to_bson(&build.config.version)
.context("failed at converting version to bson")?,
"info.last_built_at": monitor_timestamp(),
"info.last_built_at": komodo_timestamp(),
"info.built_hash": &update.commit_hash,
"info.built_message": commit_message
}},
@@ -398,8 +371,8 @@ impl Resolve<RunBuild, (User, Update)> for State {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
@@ -447,8 +420,8 @@ async fn handle_early_return(
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::BuildFailed {
@@ -657,7 +630,7 @@ async fn validate_account_extract_registry_token_aws_ecr(
.await
.context("failed to get aws ecr token")?;
ecr::maybe_create_repo(
&to_monitor_name(&build.name),
&to_komodo_name(&build.name),
region.to_string(),
access_key_id,
secret_access_key,

View File

@@ -1,6 +1,8 @@
use std::collections::HashSet;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::execute::*,
entities::{
build::{Build, ImageRegistry},
@@ -10,7 +12,7 @@ use monitor_client::{
},
get_image_name,
permission::PermissionLevel,
server::{Server, ServerState},
server::Server,
update::{Log, Update},
user::User,
Version,
@@ -23,8 +25,15 @@ use crate::{
cloud::aws::ecr,
config::core_config,
helpers::{
interpolate_variables_secrets_into_environment, periphery_client,
query::get_server_with_status, registry_token,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_container_command,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_extra_args,
},
periphery_client,
query::get_variables_and_secrets,
registry_token,
update::update_update,
},
monitor::update_cache_for_server,
@@ -47,13 +56,8 @@ async fn setup_deployment_execution(
return Err(anyhow!("deployment has no server configured"));
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
let server =
resource::get::<Server>(&deployment.config.server_id).await?;
Ok((deployment, server))
}
@@ -88,6 +92,12 @@ impl Resolve<Deploy, (User, Update)> for State {
let periphery = periphery_client(&server)?;
periphery
.health_check()
.await
.context("Failed server health check, stopping run.")?;
// This block resolves the attached Build to an actual versioned image
let (version, registry_token, aws_ecr) = match &deployment
.config
.image
@@ -181,12 +191,42 @@ impl Resolve<Deploy, (User, Update)> for State {
}
};
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers = if !deployment.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut deployment.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut deployment.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_container_command(
&vars_and_secrets,
&mut deployment.config.command,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
)
.await?
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
@@ -225,11 +265,11 @@ impl Resolve<Deploy, (User, Update)> for State {
}
}
impl Resolve<StartContainer, (User, Update)> for State {
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
impl Resolve<StartDeployment, (User, Update)> for State {
#[instrument(name = "StartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartContainer { deployment }: StartContainer,
StartDeployment { deployment }: StartDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
@@ -253,7 +293,7 @@ impl Resolve<StartContainer, (User, Update)> for State {
let log = match periphery
.request(api::container::StartContainer {
name: deployment.name.clone(),
name: deployment.name,
})
.await
{
@@ -273,11 +313,11 @@ impl Resolve<StartContainer, (User, Update)> for State {
}
}
impl Resolve<RestartContainer, (User, Update)> for State {
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
impl Resolve<RestartDeployment, (User, Update)> for State {
#[instrument(name = "RestartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RestartContainer { deployment }: RestartContainer,
RestartDeployment { deployment }: RestartDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
@@ -301,7 +341,7 @@ impl Resolve<RestartContainer, (User, Update)> for State {
let log = match periphery
.request(api::container::RestartContainer {
name: deployment.name.clone(),
name: deployment.name,
})
.await
{
@@ -323,11 +363,11 @@ impl Resolve<RestartContainer, (User, Update)> for State {
}
}
impl Resolve<PauseContainer, (User, Update)> for State {
#[instrument(name = "PauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
impl Resolve<PauseDeployment, (User, Update)> for State {
#[instrument(name = "PauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseContainer { deployment }: PauseContainer,
PauseDeployment { deployment }: PauseDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
@@ -351,7 +391,7 @@ impl Resolve<PauseContainer, (User, Update)> for State {
let log = match periphery
.request(api::container::PauseContainer {
name: deployment.name.clone(),
name: deployment.name,
})
.await
{
@@ -371,11 +411,11 @@ impl Resolve<PauseContainer, (User, Update)> for State {
}
}
impl Resolve<UnpauseContainer, (User, Update)> for State {
#[instrument(name = "UnpauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
impl Resolve<UnpauseDeployment, (User, Update)> for State {
#[instrument(name = "UnpauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseContainer { deployment }: UnpauseContainer,
UnpauseDeployment { deployment }: UnpauseDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
@@ -399,7 +439,7 @@ impl Resolve<UnpauseContainer, (User, Update)> for State {
let log = match periphery
.request(api::container::UnpauseContainer {
name: deployment.name.clone(),
name: deployment.name,
})
.await
{
@@ -421,15 +461,15 @@ impl Resolve<UnpauseContainer, (User, Update)> for State {
}
}
impl Resolve<StopContainer, (User, Update)> for State {
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
impl Resolve<StopDeployment, (User, Update)> for State {
#[instrument(name = "StopDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopContainer {
StopDeployment {
deployment,
signal,
time,
}: StopContainer,
}: StopDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
@@ -453,7 +493,7 @@ impl Resolve<StopContainer, (User, Update)> for State {
let log = match periphery
.request(api::container::StopContainer {
name: deployment.name.clone(),
name: deployment.name,
signal: signal
.unwrap_or(deployment.config.termination_signal)
.into(),
@@ -479,15 +519,15 @@ impl Resolve<StopContainer, (User, Update)> for State {
}
}
impl Resolve<RemoveContainer, (User, Update)> for State {
#[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
impl Resolve<DestroyDeployment, (User, Update)> for State {
#[instrument(name = "DestroyDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RemoveContainer {
DestroyDeployment {
deployment,
signal,
time,
}: RemoveContainer,
}: DestroyDeployment,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (deployment, server) =
@@ -502,7 +542,7 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.removing = true)?;
action_state.update(|state| state.destroying = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
@@ -511,7 +551,7 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
let log = match periphery
.request(api::container::RemoveContainer {
name: deployment.name.clone(),
name: deployment.name,
signal: signal
.unwrap_or(deployment.config.termination_signal)
.into(),

View File

@@ -3,7 +3,7 @@ use std::time::Instant;
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::execute::*,
entities::{
update::{Log, Update},
@@ -39,19 +39,34 @@ mod sync;
#[serde(tag = "type", content = "params")]
pub enum ExecuteRequest {
// ==== SERVER ====
StopAllContainers(StopAllContainers),
PruneContainers(PruneContainers),
PruneImages(PruneImages),
PruneNetworks(PruneNetworks),
// ==== DEPLOYMENT ====
Deploy(Deploy),
StartContainer(StartContainer),
RestartContainer(RestartContainer),
PauseContainer(PauseContainer),
UnpauseContainer(UnpauseContainer),
StopContainer(StopContainer),
RemoveContainer(RemoveContainer),
DestroyContainer(DestroyContainer),
StartAllContainers(StartAllContainers),
RestartAllContainers(RestartAllContainers),
PauseAllContainers(PauseAllContainers),
UnpauseAllContainers(UnpauseAllContainers),
StopAllContainers(StopAllContainers),
PruneContainers(PruneContainers),
DeleteNetwork(DeleteNetwork),
PruneNetworks(PruneNetworks),
DeleteImage(DeleteImage),
PruneImages(PruneImages),
DeleteVolume(DeleteVolume),
PruneVolumes(PruneVolumes),
PruneSystem(PruneSystem),
// ==== DEPLOYMENT ====
Deploy(Deploy),
StartDeployment(StartDeployment),
RestartDeployment(RestartDeployment),
PauseDeployment(PauseDeployment),
UnpauseDeployment(UnpauseDeployment),
StopDeployment(StopDeployment),
DestroyDeployment(DestroyDeployment),
// ==== STACK ====
DeployStack(DeployStack),
@@ -144,10 +159,7 @@ async fn task(
user: User,
update: Update,
) -> anyhow::Result<String> {
info!(
"/execute request {req_id} | user: {} ({})",
user.username, user.id
);
info!("/execute request {req_id} | user: {}", user.username);
let timer = Instant::now();
let res = State

View File

@@ -1,7 +1,7 @@
use std::pin::Pin;
use formatting::{bold, colored, format_serror, muted, Color};
use monitor_client::{
use komodo_client::{
api::execute::RunProcedure,
entities::{
permission::PermissionLevel, procedure::Procedure,

View File

@@ -1,16 +1,16 @@
use std::{future::IntoFuture, time::Duration};
use std::{collections::HashSet, future::IntoFuture, time::Duration};
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::execute::*,
entities::{
alert::{Alert, AlertData},
alert::{Alert, AlertData, SeverityLevel},
builder::{Builder, BuilderConfig},
monitor_timestamp, optional_string,
komodo_timestamp, optional_string,
permission::PermissionLevel,
repo::Repo,
server::{stats::SeverityLevel, Server},
server::Server,
update::{Log, Update},
user::User,
},
@@ -22,7 +22,7 @@ use mungos::{
options::FindOneOptions,
},
};
use periphery_client::api::{self, git::RepoActionResponseV1_13};
use periphery_client::api;
use resolver_api::Resolve;
use tokio_util::sync::CancellationToken;
@@ -31,7 +31,14 @@ use crate::{
alert::send_alerts,
builder::{cleanup_builder_instance, get_builder_periphery},
channel::repo_cancel_channel,
git_token, periphery_client,
git_token,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_system_command,
},
periphery_client,
query::get_variables_and_secrets,
update::update_update,
},
resource::{self, refresh_repo_state_cache},
@@ -84,6 +91,11 @@ impl Resolve<CloneRepo, (User, Update)> for State {
let periphery = periphery_client(&server)?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers =
interpolate(&mut repo, &mut update).await?;
let logs = match periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
@@ -91,13 +103,11 @@ impl Resolve<CloneRepo, (User, Update)> for State {
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(res) => {
let res: RepoActionResponseV1_13 = res.into();
res.logs
}
Ok(res) => res.logs,
Err(e) => {
vec![Log::error(
"clone repo",
@@ -124,7 +134,7 @@ impl Resolve<PullRepo, (User, Update)> for State {
PullRepo { repo }: PullRepo,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let repo = resource::get_check_permissions::<Repo>(
let mut repo = resource::get_check_permissions::<Repo>(
&repo,
&user,
PermissionLevel::Execute,
@@ -151,6 +161,11 @@ impl Resolve<PullRepo, (User, Update)> for State {
let periphery = periphery_client(&server)?;
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers =
interpolate(&mut repo, &mut update).await?;
let logs = match periphery
.request(api::git::PullRepo {
name: repo.name.clone(),
@@ -160,11 +175,11 @@ impl Resolve<PullRepo, (User, Update)> for State {
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect(),
})
.await
{
Ok(res) => {
let res: RepoActionResponseV1_13 = res.into();
update.commit_hash = res.commit_hash.unwrap_or_default();
res.logs
}
@@ -217,7 +232,7 @@ async fn update_last_pulled_time(repo_name: &str) {
.repos
.update_one(
doc! { "name": repo_name },
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
doc! { "$set": { "info.last_pulled_at": komodo_timestamp() } },
)
.await;
if let Err(e) = res {
@@ -337,14 +352,20 @@ impl Resolve<BuildRepo, (User, Update)> for State {
// CLONE REPO
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers =
interpolate(&mut repo, &mut update).await?;
let res = tokio::select! {
res = periphery
.request(api::git::CloneRepo {
args: (&repo).into(),
git_token,
environment: Default::default(),
env_file_path: Default::default(),
skip_secret_interp: Default::default(),
environment: repo.config.environment,
env_file_path: repo.config.env_file_path,
skip_secret_interp: repo.config.skip_secret_interp,
replacers: secret_replacers.into_iter().collect()
}) => res,
_ = cancel.cancelled() => {
debug!("build cancelled during clone, cleaning up builder");
@@ -359,7 +380,6 @@ impl Resolve<BuildRepo, (User, Update)> for State {
let commit_message = match res {
Ok(res) => {
debug!("finished repo clone");
let res: RepoActionResponseV1_13 = res.into();
update.logs.extend(res.logs);
update.commit_hash = res.commit_hash.unwrap_or_default();
res.commit_message.unwrap_or_default()
@@ -383,7 +403,7 @@ impl Resolve<BuildRepo, (User, Update)> for State {
.update_one(
doc! { "name": &repo.name },
doc! { "$set": {
"info.last_built_at": monitor_timestamp(),
"info.last_built_at": komodo_timestamp(),
"info.built_hash": &update.commit_hash,
"info.built_message": commit_message
}},
@@ -421,8 +441,8 @@ impl Resolve<BuildRepo, (User, Update)> for State {
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::RepoBuildFailed {
@@ -468,8 +488,8 @@ async fn handle_builder_early_return(
let alert = Alert {
id: Default::default(),
target,
ts: monitor_timestamp(),
resolved_ts: Some(monitor_timestamp()),
ts: komodo_timestamp(),
resolved_ts: Some(komodo_timestamp()),
resolved: true,
level: SeverityLevel::Warning,
data: AlertData::RepoBuildFailed {
@@ -589,3 +609,46 @@ impl Resolve<CancelRepoBuild, (User, Update)> for State {
Ok(update)
}
}
async fn interpolate(
repo: &mut Repo,
update: &mut Update,
) -> anyhow::Result<HashSet<(String, String)>> {
if !repo.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut repo.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut repo.config.on_clone,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_system_command(
&vars_and_secrets,
&mut repo.config.on_pull,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
update,
&global_replacers,
&secret_replacers,
);
Ok(secret_replacers)
} else {
Ok(Default::default())
}
}

View File

@@ -1,12 +1,12 @@
use anyhow::{anyhow, Context};
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::execute::*,
entities::{
all_logs_success, monitor_timestamp,
all_logs_success,
permission::PermissionLevel,
server::{Server, ServerState},
update::{Log, Update, UpdateStatus},
server::Server,
update::{Log, Update},
user::User,
},
};
@@ -14,14 +14,540 @@ use periphery_client::api;
use resolver_api::Resolve;
use crate::{
helpers::{
periphery_client, query::get_server_with_status,
update::update_update,
},
helpers::{periphery_client, update::update_update},
monitor::update_cache_for_server,
resource,
state::{action_states, State},
};
impl Resolve<StartContainer, (User, Update)> for State {
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartContainer { server, container }: StartContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure deployment not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.starting_containers = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::StartContainer { name: container })
.await
{
Ok(log) => log,
Err(e) => Log::error(
"start container",
format_serror(&e.context("failed to start container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RestartContainer, (User, Update)> for State {
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RestartContainer { server, container }: RestartContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the deployment (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.restarting_containers = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::RestartContainer { name: container })
.await
{
Ok(log) => log,
Err(e) => Log::error(
"restart container",
format_serror(
&e.context("failed to restart container").into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PauseContainer, (User, Update)> for State {
#[instrument(name = "PauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseContainer { server, container }: PauseContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pausing_containers = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::PauseContainer { name: container })
.await
{
Ok(log) => log,
Err(e) => Log::error(
"pause container",
format_serror(&e.context("failed to pause container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<UnpauseContainer, (User, Update)> for State {
#[instrument(name = "UnpauseContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseContainer { server, container }: UnpauseContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.unpausing_containers = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::UnpauseContainer { name: container })
.await
{
Ok(log) => log,
Err(e) => Log::error(
"unpause container",
format_serror(
&e.context("failed to unpause container").into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopContainer, (User, Update)> for State {
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StopContainer {
server,
container,
signal,
time,
}: StopContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.stopping_containers = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::StopContainer {
name: container,
signal,
time,
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<DestroyContainer, (User, Update)> for State {
#[instrument(name = "DestroyContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DestroyContainer {
server,
container,
signal,
time,
}: DestroyContainer,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pruning_containers = true)?;
// Send update after setting action state, this way frontend gets correct state.
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::container::RemoveContainer {
name: container,
signal,
time,
})
.await
{
Ok(log) => log,
Err(e) => Log::error(
"stop container",
format_serror(&e.context("failed to stop container").into()),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StartAllContainers, (User, Update)> for State {
#[instrument(name = "StartAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
StartAllContainers { server }: StartAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.starting_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.await
.context("failed to start all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"start all containers",
String::from("All containers have been started on the host."),
);
}
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<RestartAllContainers, (User, Update)> for State {
#[instrument(name = "RestartAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
RestartAllContainers { server }: RestartAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.restarting_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.await
.context("failed to restart all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"restart all containers",
String::from(
"All containers have been restarted on the host.",
),
);
}
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PauseAllContainers, (User, Update)> for State {
#[instrument(name = "PauseAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PauseAllContainers { server }: PauseAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pausing_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::PauseAllContainers {})
.await
.context("failed to pause all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"pause all containers",
String::from("All containers have been paused on the host."),
);
}
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<UnpauseAllContainers, (User, Update)> for State {
#[instrument(name = "UnpauseAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
UnpauseAllContainers { server }: UnpauseAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard = action_state
.update(|state| state.starting_containers = true)?;
update_update(update.clone()).await?;
let logs = periphery_client(&server)?
.request(api::container::StartAllContainers {})
.await
.context("failed to unpause all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log(
"unpause all containers",
String::from(
"All containers have been unpaused on the host.",
),
);
}
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<StopAllContainers, (User, Update)> for State {
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
@@ -29,12 +555,12 @@ impl Resolve<StopAllContainers, (User, Update)> for State {
StopAllContainers { server }: StopAllContainers,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let (server, status) = get_server_with_status(&server).await?;
if status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"
));
}
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
@@ -52,14 +578,18 @@ impl Resolve<StopAllContainers, (User, Update)> for State {
let logs = periphery_client(&server)?
.request(api::container::StopAllContainers {})
.await
.context("failed to stop all container on host")?;
.context("failed to stop all containers on host")?;
update.logs.extend(logs);
if all_logs_success(&update.logs) {
update.push_simple_log("stop all containers", String::from("All containers have successfully been stopped on the host."));
update.push_simple_log(
"stop all containers",
String::from("All containers have been stopped on the host."),
);
}
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
@@ -112,11 +642,55 @@ impl Resolve<PruneContainers, (User, Update)> for State {
),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<DeleteNetwork, (User, Update)> for State {
#[instrument(name = "DeleteNetwork", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeleteNetwork { server, name }: DeleteNetwork,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::network::DeleteNetwork { name: name.clone() })
.await
.context(format!(
"failed to delete network {name} on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"delete network",
format_serror(
&e.context(format!("failed to delete network {name}"))
.into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
@@ -166,11 +740,54 @@ impl Resolve<PruneNetworks, (User, Update)> for State {
),
};
update.success = log.success;
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<DeleteImage, (User, Update)> for State {
#[instrument(name = "DeleteImage", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeleteImage { server, name }: DeleteImage,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::image::DeleteImage { name: name.clone() })
.await
.context(format!(
"failed to delete image {name} on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"delete image",
format_serror(
&e.context(format!("failed to delete image {name}")).into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
@@ -207,7 +824,7 @@ impl Resolve<PruneImages, (User, Update)> for State {
let periphery = periphery_client(&server)?;
let log =
match periphery.request(api::build::PruneImages {}).await {
match periphery.request(api::image::PruneImages {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune images",
@@ -219,9 +836,155 @@ impl Resolve<PruneImages, (User, Update)> for State {
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<DeleteVolume, (User, Update)> for State {
#[instrument(name = "DeleteVolume", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
DeleteVolume { server, name }: DeleteVolume,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery
.request(api::volume::DeleteVolume { name: name.clone() })
.await
.context(format!(
"failed to delete volume {name} on server {}",
server.name
)) {
Ok(log) => log,
Err(e) => Log::error(
"delete volume",
format_serror(
&e.context(format!("failed to delete volume {name}"))
.into(),
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PruneVolumes, (User, Update)> for State {
#[instrument(name = "PruneVolumes", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneVolumes { server }: PruneVolumes,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pruning_volumes = true)?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log =
match periphery.request(api::volume::PruneVolumes {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune volumes",
format!(
"failed to prune volumes on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}
impl Resolve<PruneSystem, (User, Update)> for State {
#[instrument(name = "PruneSystem", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
async fn resolve(
&self,
PruneSystem { server }: PruneSystem,
(user, mut update): (User, Update),
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Execute,
)
.await?;
// get the action state for the server (or insert default).
let action_state = action_states()
.server
.get_or_insert_default(&server.id)
.await;
// Will check to ensure server not already busy before updating, and return Err if so.
// The returned guard will set the action state back to default when dropped.
let _action_guard =
action_state.update(|state| state.pruning_system = true)?;
update_update(update.clone()).await?;
let periphery = periphery_client(&server)?;
let log = match periphery.request(api::PruneSystem {}).await {
Ok(log) => log,
Err(e) => Log::error(
"prune system",
format!(
"failed to docket system prune on server {} | {e:#?}",
server.name
),
),
};
update.logs.push(log);
update_cache_for_server(&server).await;
update.finalize();
update_update(update.clone()).await?;
Ok(update)

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::{execute::LaunchServer, write::CreateServer},
entities::{
permission::PermissionLevel,

View File

@@ -1,6 +1,8 @@
use std::collections::HashSet;
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::execute::*,
entities::{
permission::PermissionLevel, stack::StackInfo, update::Update,
@@ -13,7 +15,13 @@ use resolver_api::Resolve;
use crate::{
helpers::{
interpolate_variables_secrets_into_environment, periphery_client,
interpolate::{
add_interp_update_log,
interpolate_variables_secrets_into_environment,
interpolate_variables_secrets_into_extra_args,
},
periphery_client,
query::get_variables_and_secrets,
stack::{
execute::execute_compose, get_stack_and_server,
services::extract_services_into_res,
@@ -65,13 +73,38 @@ impl Resolve<DeployStack, (User, Update)> for State {
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
)?;
if !stack.config.skip_secret_interp {
// interpolate variables / secrets, returning the sanitizing replacers to send to
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
let secret_replacers = if !stack.config.skip_secret_interp {
let vars_and_secrets = get_variables_and_secrets().await?;
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
interpolate_variables_secrets_into_environment(
&vars_and_secrets,
&mut stack.config.environment,
&mut global_replacers,
&mut secret_replacers,
)?;
interpolate_variables_secrets_into_extra_args(
&vars_and_secrets,
&mut stack.config.extra_args,
&mut global_replacers,
&mut secret_replacers,
)?;
add_interp_update_log(
&mut update,
)
.await?;
}
&global_replacers,
&secret_replacers,
);
secret_replacers
} else {
Default::default()
};
let ComposeUpResponse {
logs,
@@ -87,6 +120,7 @@ impl Resolve<DeployStack, (User, Update)> for State {
service: None,
git_token,
registry_token,
replacers: secret_replacers.into_iter().collect(),
})
.await?;

View File

@@ -3,7 +3,7 @@ use std::collections::HashMap;
use anyhow::{anyhow, Context};
use formatting::{colored, format_serror, Color};
use mongo_indexed::doc;
use monitor_client::{
use komodo_client::{
api::{execute::RunSync, write::RefreshResourceSyncPending},
entities::{
self,
@@ -11,7 +11,7 @@ use monitor_client::{
build::Build,
builder::Builder,
deployment::Deployment,
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
@@ -387,7 +387,7 @@ impl Resolve<RunSync, (User, Update)> for State {
&sync.id,
doc! {
"$set": {
"info.last_sync_ts": monitor_timestamp(),
"info.last_sync_ts": komodo_timestamp(),
"info.last_sync_hash": hash,
"info.last_sync_message": message,
}

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::read::{
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
},

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
alerter::{Alerter, AlerterListItem},

View File

@@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
use futures::TryStreamExt;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
build::{Build, BuildActionState, BuildListItem, BuildState},

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
builder::{Builder, BuilderListItem},

View File

@@ -1,13 +1,14 @@
use std::{cmp, collections::HashSet};
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
deployment::{
Deployment, DeploymentActionState, DeploymentConfig,
DeploymentListItem, DeploymentState, DockerContainerStats,
DeploymentListItem, DeploymentState,
},
docker::container::ContainerStats,
permission::PermissionLevel,
server::Server,
update::Log,
@@ -84,10 +85,10 @@ impl Resolve<GetDeploymentContainer, User> for State {
const MAX_LOG_LENGTH: u64 = 5000;
impl Resolve<GetLog, User> for State {
impl Resolve<GetDeploymentLog, User> for State {
async fn resolve(
&self,
GetLog { deployment, tail }: GetLog,
GetDeploymentLog { deployment, tail }: GetDeploymentLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
@@ -114,15 +115,15 @@ impl Resolve<GetLog, User> for State {
}
}
impl Resolve<SearchLog, User> for State {
impl Resolve<SearchDeploymentLog, User> for State {
async fn resolve(
&self,
SearchLog {
SearchDeploymentLog {
deployment,
terms,
combinator,
invert,
}: SearchLog,
}: SearchDeploymentLog,
user: User,
) -> anyhow::Result<Log> {
let Deployment {
@@ -156,7 +157,7 @@ impl Resolve<GetDeploymentStats, User> for State {
&self,
GetDeploymentStats { deployment }: GetDeploymentStats,
user: User,
) -> anyhow::Result<DockerContainerStats> {
) -> anyhow::Result<ContainerStats> {
let Deployment {
name,
config: DeploymentConfig { server_id, .. },

View File

@@ -3,7 +3,7 @@ use std::{collections::HashSet, sync::OnceLock, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
build::Build,
@@ -12,8 +12,8 @@ use monitor_client::{
repo::Repo,
server::Server,
sync::ResourceSync,
update::ResourceTarget,
user::User,
ResourceTarget,
},
};
use resolver_api::{
@@ -105,6 +105,14 @@ enum ReadRequest {
GetHistoricalServerStats(GetHistoricalServerStats),
ListServers(ListServers),
ListFullServers(ListFullServers),
InspectDockerContainer(InspectDockerContainer),
GetResourceMatchingContainer(GetResourceMatchingContainer),
GetContainerLog(GetContainerLog),
SearchContainerLog(SearchContainerLog),
InspectDockerNetwork(InspectDockerNetwork),
InspectDockerImage(InspectDockerImage),
ListDockerImageHistory(ListDockerImageHistory),
InspectDockerVolume(InspectDockerVolume),
#[to_string_resolver]
ListDockerContainers(ListDockerContainers),
#[to_string_resolver]
@@ -112,6 +120,8 @@ enum ReadRequest {
#[to_string_resolver]
ListDockerImages(ListDockerImages),
#[to_string_resolver]
ListDockerVolumes(ListDockerVolumes),
#[to_string_resolver]
ListComposeProjects(ListComposeProjects),
// ==== DEPLOYMENT ====
@@ -120,8 +130,8 @@ enum ReadRequest {
GetDeploymentContainer(GetDeploymentContainer),
GetDeploymentActionState(GetDeploymentActionState),
GetDeploymentStats(GetDeploymentStats),
GetLog(GetLog),
SearchLog(SearchLog),
GetDeploymentLog(GetDeploymentLog),
SearchDeploymentLog(SearchDeploymentLog),
ListDeployments(ListDeployments),
ListFullDeployments(ListFullDeployments),
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),

View File

@@ -1,5 +1,5 @@
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::read::{
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
ListPermissionsResponse, ListUserTargetPermissions,

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
permission::PermissionLevel,

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use mongo_indexed::{doc, Document};
use monitor_client::{
use komodo_client::{
api::read::{
GetDockerRegistryAccount, GetDockerRegistryAccountResponse,
GetGitProviderAccount, GetGitProviderAccountResponse,

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,

View File

@@ -1,9 +1,8 @@
use monitor_client::{
use komodo_client::{
api::read::{FindResources, FindResourcesResponse},
entities::{
build::Build, deployment::Deployment, procedure::Procedure,
repo::Repo, server::Server, update::ResourceTargetVariant,
user::User,
repo::Repo, server::Server, user::User, ResourceTargetVariant,
},
};
use resolver_api::Resolve;

View File

@@ -1,4 +1,5 @@
use std::{
cmp,
collections::HashMap,
sync::{Arc, OnceLock},
};
@@ -7,26 +8,42 @@ use anyhow::{anyhow, Context};
use async_timing_util::{
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
};
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
deployment::Deployment,
docker::{
container::Container,
image::{Image, ImageHistoryResponseItem},
network::Network,
volume::Volume,
},
permission::PermissionLevel,
server::{
Server, ServerActionState, ServerListItem, ServerState,
},
stack::{Stack, StackServiceNames},
update::Log,
user::User,
ResourceTarget,
},
};
use mungos::{
find::find_collect,
mongodb::{bson::doc, options::FindOptions},
};
use periphery_client::api as periphery;
use periphery_client::api::{
self as periphery,
container::InspectContainer,
image::{ImageHistory, InspectImage},
network::InspectNetwork,
volume::InspectVolume,
};
use resolver_api::{Resolve, ResolveToString};
use tokio::sync::Mutex;
use crate::{
helpers::periphery_client,
helpers::{periphery_client, stack::compose_container_match_regex},
resource,
state::{action_states, db_client, server_status_cache, State},
};
@@ -326,10 +343,10 @@ impl Resolve<GetHistoricalServerStats, User> for State {
}
}
impl ResolveToString<ListDockerImages, User> for State {
impl ResolveToString<ListDockerContainers, User> for State {
async fn resolve_to_string(
&self,
ListDockerImages { server }: ListDockerImages,
ListDockerContainers { server }: ListDockerContainers,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
@@ -341,8 +358,8 @@ impl ResolveToString<ListDockerImages, User> for State {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(images) = &cache.images {
serde_json::to_string(images)
if let Some(containers) = &cache.containers {
serde_json::to_string(containers)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
@@ -350,6 +367,152 @@ impl ResolveToString<ListDockerImages, User> for State {
}
}
impl Resolve<InspectDockerContainer, User> for State {
async fn resolve(
&self,
InspectDockerContainer { server, container }: InspectDockerContainer,
user: User,
) -> anyhow::Result<Container> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect container: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectContainer { name: container })
.await
}
}
const MAX_LOG_LENGTH: u64 = 5000;
impl Resolve<GetContainerLog, User> for State {
async fn resolve(
&self,
GetContainerLog {
server,
container,
tail,
}: GetContainerLog,
user: User,
) -> anyhow::Result<Log> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(periphery::container::GetContainerLog {
name: container,
tail: cmp::min(tail, MAX_LOG_LENGTH),
})
.await
.context("failed at call to periphery")
}
}
impl Resolve<SearchContainerLog, User> for State {
async fn resolve(
&self,
SearchContainerLog {
server,
container,
terms,
combinator,
invert,
}: SearchContainerLog,
user: User,
) -> anyhow::Result<Log> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
periphery_client(&server)?
.request(periphery::container::GetContainerLogSearch {
name: container,
terms,
combinator,
invert,
})
.await
.context("failed at call to periphery")
}
}
impl Resolve<GetResourceMatchingContainer, User> for State {
async fn resolve(
&self,
GetResourceMatchingContainer { server, container }: GetResourceMatchingContainer,
user: User,
) -> anyhow::Result<GetResourceMatchingContainerResponse> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
// first check deployments
if let Ok(deployment) =
resource::get::<Deployment>(&container).await
{
return Ok(GetResourceMatchingContainerResponse {
resource: ResourceTarget::Deployment(deployment.id).into(),
});
}
// then check stacks
let stacks =
resource::list_full_for_user_using_document::<Stack>(
doc! { "config.server_id": &server.id },
&user,
)
.await?;
// check matching stack
for stack in stacks {
for StackServiceNames {
service_name,
container_name,
} in stack
.info
.deployed_services
.unwrap_or(stack.info.latest_services)
{
let is_match = match compose_container_match_regex(&container_name)
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
{
Ok(regex) => regex,
Err(e) => {
warn!("{e:#}");
continue;
}
}.is_match(&container);
if is_match {
return Ok(GetResourceMatchingContainerResponse {
resource: ResourceTarget::Stack(stack.id).into(),
});
}
}
}
Ok(GetResourceMatchingContainerResponse { resource: None })
}
}
impl ResolveToString<ListDockerNetworks, User> for State {
async fn resolve_to_string(
&self,
@@ -374,10 +537,37 @@ impl ResolveToString<ListDockerNetworks, User> for State {
}
}
impl ResolveToString<ListDockerContainers, User> for State {
impl Resolve<InspectDockerNetwork, User> for State {
async fn resolve(
&self,
InspectDockerNetwork { server, network }: InspectDockerNetwork,
user: User,
) -> anyhow::Result<Network> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect network: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectNetwork { name: network })
.await
}
}
impl ResolveToString<ListDockerImages, User> for State {
async fn resolve_to_string(
&self,
ListDockerContainers { server }: ListDockerContainers,
ListDockerImages { server }: ListDockerImages,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
@@ -389,8 +579,8 @@ impl ResolveToString<ListDockerContainers, User> for State {
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(containers) = &cache.containers {
serde_json::to_string(containers)
if let Some(images) = &cache.images {
serde_json::to_string(images)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
@@ -398,6 +588,111 @@ impl ResolveToString<ListDockerContainers, User> for State {
}
}
impl Resolve<InspectDockerImage, User> for State {
async fn resolve(
&self,
InspectDockerImage { server, image }: InspectDockerImage,
user: User,
) -> anyhow::Result<Image> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect image: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectImage { name: image })
.await
}
}
impl Resolve<ListDockerImageHistory, User> for State {
async fn resolve(
&self,
ListDockerImageHistory { server, image }: ListDockerImageHistory,
user: User,
) -> anyhow::Result<Vec<ImageHistoryResponseItem>> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot get image history: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(ImageHistory { name: image })
.await
}
}
impl ResolveToString<ListDockerVolumes, User> for State {
async fn resolve_to_string(
&self,
ListDockerVolumes { server }: ListDockerVolumes,
user: User,
) -> anyhow::Result<String> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if let Some(volumes) = &cache.volumes {
serde_json::to_string(volumes)
.context("failed to serialize response")
} else {
Ok(String::from("[]"))
}
}
}
impl Resolve<InspectDockerVolume, User> for State {
async fn resolve(
&self,
InspectDockerVolume { server, volume }: InspectDockerVolume,
user: User,
) -> anyhow::Result<Volume> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Read,
)
.await?;
let cache = server_status_cache()
.get_or_insert_default(&server.id)
.await;
if cache.state != ServerState::Ok {
return Err(anyhow!(
"Cannot inspect volume: server is {:?}",
cache.state
));
}
periphery_client(&server)?
.request(InspectVolume { name: volume })
.await
}
}
impl ResolveToString<ListComposeProjects, User> for State {
async fn resolve_to_string(
&self,

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use mongo_indexed::Document;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
permission::PermissionLevel, server_template::ServerTemplate,

View File

@@ -1,7 +1,7 @@
use std::collections::HashSet;
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::read::*,
entities::{
config::core::CoreConfig,

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use mongo_indexed::doc;
use monitor_client::{
use komodo_client::{
api::read::{GetTag, ListTags},
entities::{tag::Tag, user::User},
};

View File

@@ -1,7 +1,7 @@
use std::collections::HashMap;
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::{
execute::Execution,
read::{
@@ -30,8 +30,8 @@ use monitor_client::{
toml::{
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
},
update::ResourceTarget,
user::User,
ResourceTarget,
},
};
use mungos::find::find_collect;
@@ -42,7 +42,7 @@ use serde_json::Value;
use crate::{
helpers::query::get_user_user_group_ids,
resource::{self, MonitorResource},
resource::{self, KomodoResource},
state::{db_client, State},
};
@@ -355,7 +355,15 @@ impl Resolve<ExportResourcesToToml, User> for State {
res.variables =
find_collect(&db_client().await.variables, None, None)
.await
.context("failed to get variables from db")?;
.context("failed to get variables from db")?
.into_iter()
.map(|mut variable| {
if !user.admin && variable.is_secret {
variable.value = "#".repeat(variable.value.len())
}
variable
})
.collect();
}
let toml = serialize_resources_toml(&res)
@@ -399,7 +407,7 @@ async fn add_procedure(
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::StartContainer(exec) => {
Execution::StartDeployment(exec) => {
exec.deployment.clone_from(
names
.deployments
@@ -407,7 +415,7 @@ async fn add_procedure(
.unwrap_or(&String::new()),
)
}
Execution::RestartContainer(exec) => {
Execution::RestartDeployment(exec) => {
exec.deployment.clone_from(
names
.deployments
@@ -415,7 +423,7 @@ async fn add_procedure(
.unwrap_or(&String::new()),
)
}
Execution::PauseContainer(exec) => {
Execution::PauseDeployment(exec) => {
exec.deployment.clone_from(
names
.deployments
@@ -423,7 +431,7 @@ async fn add_procedure(
.unwrap_or(&String::new()),
)
}
Execution::UnpauseContainer(exec) => {
Execution::UnpauseDeployment(exec) => {
exec.deployment.clone_from(
names
.deployments
@@ -431,13 +439,15 @@ async fn add_procedure(
.unwrap_or(&String::new()),
)
}
Execution::StopContainer(exec) => exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
),
Execution::RemoveContainer(exec) => {
Execution::StopDeployment(exec) => {
exec.deployment.clone_from(
names
.deployments
.get(&exec.deployment)
.unwrap_or(&String::new()),
)
}
Execution::DestroyDeployment(exec) => {
exec.deployment.clone_from(
names
.deployments
@@ -457,16 +467,69 @@ async fn add_procedure(
Execution::CancelRepoBuild(exec) => exec.repo.clone_from(
names.repos.get(&exec.repo).unwrap_or(&String::new()),
),
Execution::StartContainer(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::RestartContainer(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PauseContainer(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::UnpauseContainer(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::StopContainer(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::DestroyContainer(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::StartAllContainers(exec) => {
exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
)
}
Execution::RestartAllContainers(exec) => {
exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
)
}
Execution::PauseAllContainers(exec) => {
exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
)
}
Execution::UnpauseAllContainers(exec) => {
exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
)
}
Execution::StopAllContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneContainers(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::DeleteNetwork(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneNetworks(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::DeleteImage(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneImages(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneContainers(exec) => exec.server.clone_from(
Execution::DeleteVolume(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneVolumes(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::PruneSystem(exec) => exec.server.clone_from(
names.servers.get(&exec.server).unwrap_or(&String::new()),
),
Execution::RunSync(exec) => exec.sync.clone_from(
@@ -679,7 +742,7 @@ async fn add_user_groups(
Ok(())
}
fn convert_resource<R: MonitorResource>(
fn convert_resource<R: KomodoResource>(
resource: Resource<R::Config, R::Info>,
tag_names: &HashMap<String, String>,
) -> ResourceToml<R::PartialConfig> {

View File

@@ -1,7 +1,7 @@
use std::collections::HashMap;
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
entities::{
alerter::Alerter,
@@ -15,8 +15,9 @@ use monitor_client::{
server_template::ServerTemplate,
stack::Stack,
sync::ResourceSync,
update::{ResourceTarget, Update, UpdateListItem},
update::{Update, UpdateListItem},
user::User,
ResourceTarget,
},
};
use mungos::{

View File

@@ -1,5 +1,5 @@
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::read::{
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
ListApiKeys, ListApiKeysForServiceUser,

View File

@@ -1,7 +1,7 @@
use std::str::FromStr;
use anyhow::Context;
use monitor_client::{
use komodo_client::{
api::read::{
GetUserGroup, GetUserGroupResponse, ListUserGroups,
ListUserGroupsResponse,

View File

@@ -1,6 +1,6 @@
use anyhow::Context;
use mongo_indexed::doc;
use monitor_client::{
use komodo_client::{
api::read::{
GetVariable, GetVariableResponse, ListVariables,
ListVariablesResponse,
@@ -19,9 +19,14 @@ impl Resolve<GetVariable, User> for State {
async fn resolve(
&self,
GetVariable { name }: GetVariable,
_: User,
user: User,
) -> anyhow::Result<GetVariableResponse> {
get_variable(&name).await
let mut variable = get_variable(&name).await?;
if !variable.is_secret || user.admin {
return Ok(variable);
}
variable.value = "#".repeat(variable.value.len());
Ok(variable)
}
}
@@ -29,14 +34,27 @@ impl Resolve<ListVariables, User> for State {
async fn resolve(
&self,
ListVariables {}: ListVariables,
_: User,
user: User,
) -> anyhow::Result<ListVariablesResponse> {
find_collect(
let variables = find_collect(
&db_client().await.variables,
None,
FindOptions::builder().sort(doc! { "name": 1 }).build(),
)
.await
.context("failed to query db for variables")
.context("failed to query db for variables")?;
if user.admin {
return Ok(variables);
}
let variables = variables
.into_iter()
.map(|mut variable| {
if variable.is_secret {
variable.value = "#".repeat(variable.value.len());
}
variable
})
.collect();
Ok(variables)
}
}

View File

@@ -3,16 +3,16 @@ use std::{collections::VecDeque, time::Instant};
use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Json, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use mongo_indexed::doc;
use monitor_client::{
use komodo_client::{
api::user::{
CreateApiKey, CreateApiKeyResponse, DeleteApiKey,
DeleteApiKeyResponse, PushRecentlyViewed,
PushRecentlyViewedResponse, SetLastSeenUpdate,
SetLastSeenUpdateResponse,
},
entities::{api_key::ApiKey, monitor_timestamp, user::User},
entities::{api_key::ApiKey, komodo_timestamp, user::User},
};
use mongo_indexed::doc;
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
use resolver_api::{derive::Resolver, Resolve, Resolver};
use serde::{Deserialize, Serialize};
@@ -132,7 +132,7 @@ impl Resolve<SetLastSeenUpdate, User> for State {
&db_client().await.users,
&user.id,
mungos::update::Update::Set(doc! {
"last_update_view": monitor_timestamp()
"last_update_view": komodo_timestamp()
}),
None,
)
@@ -168,7 +168,7 @@ impl Resolve<CreateApiKey, User> for State {
key: key.clone(),
secret: secret_hash,
user_id: user.id.clone(),
created_at: monitor_timestamp(),
created_at: komodo_timestamp(),
expires,
};
db_client()

View File

@@ -1,4 +1,4 @@
use monitor_client::{
use komodo_client::{
api::write::{
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
},

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use mongo_indexed::doc;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
build::{Build, BuildInfo, PartialBuildConfig},
@@ -131,6 +131,7 @@ impl Resolve<RefreshBuildCache, User> for State {
&[],
"",
None,
&[],
)
.await
.context("failed to clone build repo")?;

View File

@@ -1,4 +1,4 @@
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
builder::Builder, permission::PermissionLevel, user::User,

View File

@@ -1,12 +1,12 @@
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
deployment::{Deployment, DeploymentState},
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
server::Server,
to_monitor_name,
to_komodo_name,
update::Update,
user::User,
Operation,
@@ -102,7 +102,7 @@ impl Resolve<RenameDeployment, User> for State {
let _action_guard =
action_state.update(|state| state.renaming = true)?;
let name = to_monitor_name(&name);
let name = to_komodo_name(&name);
let container_state = get_deployment_state(&deployment).await?;
@@ -119,7 +119,7 @@ impl Resolve<RenameDeployment, User> for State {
&db_client().await.deployments,
&deployment.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": monitor_timestamp() },
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)

View File

@@ -1,11 +1,11 @@
use anyhow::anyhow;
use monitor_client::{
use komodo_client::{
api::write::{UpdateDescription, UpdateDescriptionResponse},
entities::{
alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, update::ResourceTarget, user::User,
sync::ResourceSync, user::User, ResourceTarget,
},
};
use resolver_api::Resolve;

View File

@@ -4,7 +4,7 @@ use anyhow::{anyhow, Context};
use axum::{middleware, routing::post, Extension, Router};
use axum_extra::{headers::ContentType, TypedHeader};
use derive_variants::{EnumVariants, ExtractVariant};
use monitor_client::{api::write::*, entities::user::User};
use komodo_client::{api::write::*, entities::user::User};
use resolver_api::{derive::Resolver, Resolver};
use serde::{Deserialize, Serialize};
use serror::Json;
@@ -68,7 +68,6 @@ pub enum WriteRequest {
UpdateServer(UpdateServer),
RenameServer(RenameServer),
CreateNetwork(CreateNetwork),
DeleteNetwork(DeleteNetwork),
// ==== DEPLOYMENT ====
CreateDeployment(CreateDeployment),
@@ -148,6 +147,7 @@ pub enum WriteRequest {
CreateVariable(CreateVariable),
UpdateVariableValue(UpdateVariableValue),
UpdateVariableDescription(UpdateVariableDescription),
UpdateVariableIsSecret(UpdateVariableIsSecret),
DeleteVariable(DeleteVariable),
// ==== PROVIDERS ====

View File

@@ -1,7 +1,7 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::write::{
UpdatePermissionOnResourceType,
UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget,
@@ -10,8 +10,8 @@ use monitor_client::{
},
entities::{
permission::{UserTarget, UserTargetVariant},
update::{ResourceTarget, ResourceTargetVariant},
user::User,
ResourceTarget, ResourceTargetVariant,
},
};
use mungos::{

View File

@@ -1,4 +1,4 @@
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
permission::PermissionLevel, procedure::Procedure, user::User,

View File

@@ -1,11 +1,10 @@
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
provider::{DockerRegistryAccount, GitProviderAccount},
update::ResourceTarget,
user::User,
Operation,
Operation, ResourceTarget,
},
};
use mungos::{

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use mongo_indexed::doc;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
config::core::CoreConfig,
@@ -134,6 +134,7 @@ impl Resolve<RefreshRepoCache, User> for State {
&[],
"",
None,
&[],
)
.await
.context("failed to clone repo (the resource) repo")?;

View File

@@ -1,9 +1,9 @@
use anyhow::Context;
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
server::Server,
update::{Update, UpdateStatus},
@@ -73,7 +73,7 @@ impl Resolve<RenameServer, User> for State {
let mut update =
make_update(&server, Operation::RenameServer, &user);
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": monitor_timestamp() }), None)
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None)
.await
.context("failed to update server on db. this name may already be taken.")?;
update.push_simple_log(
@@ -124,42 +124,3 @@ impl Resolve<CreateNetwork, User> for State {
Ok(update)
}
}
impl Resolve<DeleteNetwork, User> for State {
#[instrument(name = "DeleteNetwork", skip(self, user))]
async fn resolve(
&self,
DeleteNetwork { server, name }: DeleteNetwork,
user: User,
) -> anyhow::Result<Update> {
let server = resource::get_check_permissions::<Server>(
&server,
&user,
PermissionLevel::Write,
)
.await?;
let periphery = periphery_client(&server)?;
let mut update =
make_update(&server, Operation::DeleteNetwork, &user);
update.status = UpdateStatus::InProgress;
update.id = add_update(update.clone()).await?;
match periphery
.request(api::network::DeleteNetwork { name })
.await
{
Ok(log) => update.logs.push(log),
Err(e) => update.push_error_log(
"delete network",
format_serror(&e.context("failed to delete network").into()),
),
};
update.finalize();
update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -1,4 +1,4 @@
use monitor_client::{
use komodo_client::{
api::write::{
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
UpdateServerTemplate,

View File

@@ -1,7 +1,7 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::{
user::CreateApiKey,
write::{
@@ -13,7 +13,7 @@ use monitor_client::{
},
},
entities::{
monitor_timestamp,
komodo_timestamp,
user::{User, UserConfig},
},
};
@@ -53,7 +53,7 @@ impl Resolve<CreateServiceUser, User> for State {
last_update_view: 0,
recents: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
updated_at: komodo_timestamp(),
};
user.id = db_client()
.await

View File

@@ -1,10 +1,10 @@
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
config::core::CoreConfig,
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
server::ServerState,
stack::{ComposeContents, PartialStackConfig, Stack, StackInfo},
@@ -29,7 +29,7 @@ use crate::{
config::core_config,
helpers::{
periphery_client,
query::get_server_with_status,
query::get_server_with_state,
stack::{
remote::get_remote_compose_contents,
services::extract_services_into_res,
@@ -112,7 +112,7 @@ impl Resolve<RenameStack, User> for State {
&db_client().await.stacks,
&stack.id,
mungos::update::Update::Set(
doc! { "name": &name, "updated_at": monitor_timestamp() },
doc! { "name": &name, "updated_at": komodo_timestamp() },
),
None,
)
@@ -177,7 +177,7 @@ impl Resolve<RefreshStackCache, User> for State {
(vec![], None, None, None, None)
} else {
let (server, status) =
get_server_with_status(&stack.config.server_id).await?;
get_server_with_state(&stack.config.server_id).await?;
if status != ServerState::Ok {
(vec![], None, None, None, None)
} else {

View File

@@ -2,21 +2,21 @@ use std::collections::HashMap;
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::{
use komodo_client::{
api::write::*,
entities::{
self,
alert::{Alert, AlertData},
alert::{Alert, AlertData, SeverityLevel},
alerter::Alerter,
build::Build,
builder::Builder,
config::core::CoreConfig,
deployment::Deployment,
monitor_timestamp,
komodo_timestamp,
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
server::{stats::SeverityLevel, Server},
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::{
@@ -24,7 +24,7 @@ use monitor_client::{
PendingSyncUpdatesData, PendingSyncUpdatesDataErr,
PendingSyncUpdatesDataOk, ResourceSync,
},
update::ResourceTarget,
ResourceTarget,
user::User,
NoData,
},
@@ -328,7 +328,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
(None, true) => {
let alert = Alert {
id: Default::default(),
ts: monitor_timestamp(),
ts: komodo_timestamp(),
resolved: false,
level: SeverityLevel::Ok,
target: ResourceTarget::ResourceSync(id.clone()),
@@ -351,7 +351,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
doc! {
"$set": {
"resolved": true,
"resolved_ts": monitor_timestamp()
"resolved_ts": komodo_timestamp()
}
},
None,

View File

@@ -1,7 +1,7 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::write::{
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
UpdateTagsOnResourceResponse,
@@ -11,7 +11,7 @@ use monitor_client::{
deployment::Deployment, permission::PermissionLevel,
procedure::Procedure, repo::Repo, server::Server,
server_template::ServerTemplate, stack::Stack,
sync::ResourceSync, tag::Tag, update::ResourceTarget, user::User,
sync::ResourceSync, tag::Tag, user::User, ResourceTarget,
},
};
use mungos::{
@@ -59,6 +59,7 @@ impl Resolve<CreateTag, User> for State {
}
impl Resolve<RenameTag, User> for State {
#[instrument(name = "RenameTag", skip(self, user))]
async fn resolve(
&self,
RenameTag { id, name }: RenameTag,

View File

@@ -1,12 +1,12 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::write::{
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup,
},
entities::{monitor_timestamp, user::User, user_group::UserGroup},
entities::{komodo_timestamp, user::User, user_group::UserGroup},
};
use mungos::{
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
@@ -30,7 +30,7 @@ impl Resolve<CreateUserGroup, User> for State {
id: Default::default(),
users: Default::default(),
all: Default::default(),
updated_at: monitor_timestamp(),
updated_at: komodo_timestamp(),
name,
};
let db = db_client().await;

View File

@@ -1,13 +1,14 @@
use anyhow::{anyhow, Context};
use monitor_client::{
use komodo_client::{
api::write::{
CreateVariable, CreateVariableResponse, DeleteVariable,
DeleteVariableResponse, UpdateVariableDescription,
UpdateVariableDescriptionResponse, UpdateVariableValue,
UpdateVariableDescriptionResponse, UpdateVariableIsSecret,
UpdateVariableIsSecretResponse, UpdateVariableValue,
UpdateVariableValueResponse,
},
entities::{
update::ResourceTarget, user::User, variable::Variable, Operation,
user::User, variable::Variable, Operation, ResourceTarget,
},
};
use mungos::mongodb::bson::doc;
@@ -22,12 +23,14 @@ use crate::{
};
impl Resolve<CreateVariable, User> for State {
#[instrument(name = "CreateVariable", skip(self, user, value))]
async fn resolve(
&self,
CreateVariable {
name,
value,
description,
is_secret,
}: CreateVariable,
user: User,
) -> anyhow::Result<CreateVariableResponse> {
@@ -39,6 +42,7 @@ impl Resolve<CreateVariable, User> for State {
name,
value,
description,
is_secret,
};
db_client()
@@ -65,6 +69,7 @@ impl Resolve<CreateVariable, User> for State {
}
impl Resolve<UpdateVariableValue, User> for State {
#[instrument(name = "UpdateVariableValue", skip(self, user, value))]
async fn resolve(
&self,
UpdateVariableValue { name, value }: UpdateVariableValue,
@@ -96,13 +101,19 @@ impl Resolve<UpdateVariableValue, User> for State {
&user,
);
update.push_simple_log(
"update variable value",
let log = if variable.is_secret {
format!(
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
variable.value.replace(|_| true, "#")
)
} else {
format!(
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
variable.value
),
);
)
};
update.push_simple_log("update variable value", log);
update.finalize();
add_update(update).await?;
@@ -112,6 +123,7 @@ impl Resolve<UpdateVariableValue, User> for State {
}
impl Resolve<UpdateVariableDescription, User> for State {
#[instrument(name = "UpdateVariableDescription", skip(self, user))]
async fn resolve(
&self,
UpdateVariableDescription { name, description }: UpdateVariableDescription,
@@ -133,6 +145,29 @@ impl Resolve<UpdateVariableDescription, User> for State {
}
}
impl Resolve<UpdateVariableIsSecret, User> for State {
#[instrument(name = "UpdateVariableIsSecret", skip(self, user))]
async fn resolve(
&self,
UpdateVariableIsSecret { name, is_secret }: UpdateVariableIsSecret,
user: User,
) -> anyhow::Result<UpdateVariableIsSecretResponse> {
if !user.admin {
return Err(anyhow!("only admins can update variables"));
}
db_client()
.await
.variables
.update_one(
doc! { "name": &name },
doc! { "$set": { "is_secret": is_secret } },
)
.await
.context("failed to update variable is secret on db")?;
get_variable(&name).await
}
}
impl Resolve<DeleteVariable, User> for State {
async fn resolve(
&self,

View File

@@ -1,7 +1,7 @@
use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use monitor_client::entities::config::core::{
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;

View File

@@ -3,8 +3,8 @@ use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::{
monitor_timestamp,
use komodo_client::entities::{
komodo_timestamp,
user::{User, UserConfig},
};
use mungos::mongodb::bson::doc;
@@ -75,7 +75,7 @@ async fn callback(
.generate(user.id)
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let ts = komodo_timestamp();
let no_users_exist =
db_client.users.find_one(Document::new()).await?.is_none();
let user = User {

View File

@@ -2,7 +2,7 @@ use std::sync::OnceLock;
use anyhow::{anyhow, Context};
use jwt::Token;
use monitor_client::entities::config::core::{
use komodo_client::entities::config::core::{
CoreConfig, OauthCredentials,
};
use reqwest::StatusCode;
@@ -73,7 +73,7 @@ impl GoogleOauthClient {
client_id: id.clone(),
client_secret: secret.clone(),
redirect_uri: format!("{host}/auth/google/callback"),
user_agent: String::from("monitor"),
user_agent: String::from("komodo"),
states: Default::default(),
scopes,
}

View File

@@ -4,7 +4,7 @@ use axum::{
extract::Query, response::Redirect, routing::get, Router,
};
use mongo_indexed::Document;
use monitor_client::entities::user::{User, UserConfig};
use komodo_client::entities::user::{User, UserConfig};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
use serde::Deserialize;

View File

@@ -6,7 +6,7 @@ use async_timing_util::{
};
use hmac::{Hmac, Mac};
use jwt::SignWithKey;
use monitor_client::entities::config::core::CoreConfig;
use komodo_client::entities::config::core::CoreConfig;
use mungos::mongodb::bson::doc;
use serde::{Deserialize, Serialize};
use sha2::Sha256;

View File

@@ -4,7 +4,7 @@ use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::http::HeaderMap;
use mongo_indexed::Document;
use monitor_client::{
use komodo_client::{
api::auth::{
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
LoginLocalUserResponse,

View File

@@ -5,7 +5,7 @@ use axum::{
extract::Request, http::HeaderMap, middleware::Next,
response::Response,
};
use monitor_client::entities::{monitor_timestamp, user::User};
use komodo_client::entities::{komodo_timestamp, user::User};
use mungos::mongodb::bson::doc;
use reqwest::StatusCode;
use serde::Deserialize;
@@ -122,7 +122,7 @@ pub async fn auth_api_key_get_user_id(
.await
.context("failed to query db")?
.context("no api key matching key")?;
if key.expires != 0 && key.expires < monitor_timestamp() {
if key.expires != 0 && key.expires < komodo_timestamp() {
return Err(anyhow!("api key expired"));
}
if bcrypt::verify(secret, &key.secret)

View File

@@ -12,12 +12,11 @@ use aws_sdk_ec2::{
Client,
};
use base64::Engine;
use monitor_client::entities::{
alert::{Alert, AlertData},
monitor_timestamp,
server::stats::SeverityLevel,
use komodo_client::entities::{
alert::{Alert, AlertData, SeverityLevel},
komodo_timestamp,
server_template::aws::AwsServerTemplateConfig,
update::ResourceTarget,
ResourceTarget,
};
use crate::{config::core_config, helpers::alert::send_alerts};
@@ -171,7 +170,7 @@ pub async fn terminate_ec2_instance_with_retry(
error!("failed to terminate aws instance {instance_id}.");
let alert = Alert {
id: Default::default(),
ts: monitor_timestamp(),
ts: komodo_timestamp(),
resolved: false,
level: SeverityLevel::Critical,
target: ResourceTarget::system(),

View File

@@ -162,6 +162,8 @@ pub enum HetznerLocation {
Ashburn,
#[serde(rename = "hil")]
Hillsboro,
#[serde(rename = "sin")]
Singapore,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
@@ -176,6 +178,8 @@ pub enum HetznerDatacenter {
AshburnDc1,
#[serde(rename = "hil-dc1")]
HillsboroDc1,
#[serde(rename = "sin-dc1")]
SingaporeDc1,
}
impl From<HetznerDatacenter> for HetznerLocation {
@@ -188,6 +192,7 @@ impl From<HetznerDatacenter> for HetznerLocation {
}
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
HetznerDatacenter::SingaporeDc1 => HetznerLocation::Singapore,
}
}
}

View File

@@ -5,7 +5,7 @@ use std::{
use anyhow::{anyhow, Context};
use futures::future::join_all;
use monitor_client::entities::server_template::hetzner::{
use komodo_client::entities::server_template::hetzner::{
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
HetznerVolumeFormat,
};
@@ -209,6 +209,9 @@ fn hetzner_datacenter(
HetznerDatacenter::HillsboroDc1 => {
common::HetznerDatacenter::HillsboroDc1
}
HetznerDatacenter::SingaporeDc1 => {
common::HetznerDatacenter::SingaporeDc1
}
}
}

View File

@@ -2,7 +2,7 @@ use std::sync::OnceLock;
use anyhow::Context;
use merge_config_files::parse_config_file;
use monitor_client::entities::{
use komodo_client::entities::{
config::core::{
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
GithubWebhookAppInstallationConfig, HetznerCredentials,
@@ -16,7 +16,7 @@ pub fn frontend_path() -> &'static String {
#[derive(Deserialize)]
struct FrontendEnv {
#[serde(default = "default_frontend_path")]
monitor_frontend_path: String,
komodo_frontend_path: String,
}
fn default_frontend_path() -> String {
@@ -26,11 +26,11 @@ pub fn frontend_path() -> &'static String {
static FRONTEND_PATH: OnceLock<String> = OnceLock::new();
FRONTEND_PATH.get_or_init(|| {
let FrontendEnv {
monitor_frontend_path,
komodo_frontend_path,
} = envy::from_env()
.context("failed to parse FrontendEnv")
.unwrap();
monitor_frontend_path
komodo_frontend_path
})
}
@@ -44,16 +44,16 @@ pub fn core_config() -> &'static CoreConfig {
panic!("{e:#?}");
}
};
let config_path = &env.monitor_config_path;
let config_path = &env.komodo_config_path;
let config =
parse_config_file::<CoreConfig>(config_path.as_str())
.unwrap_or_else(|e| {
panic!("failed at parsing config at {config_path} | {e:#}")
});
let installations = match (env.monitor_github_webhook_app_installations_ids, env.monitor_github_webhook_app_installations_namespaces) {
let installations = match (env.komodo_github_webhook_app_installations_ids, env.komodo_github_webhook_app_installations_namespaces) {
(Some(ids), Some(namespaces)) => {
if ids.len() != namespaces.len() {
panic!("MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
panic!("KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
}
ids
.into_iter()
@@ -65,7 +65,7 @@ pub fn core_config() -> &'static CoreConfig {
.collect()
},
(Some(_), None) | (None, Some(_)) => {
panic!("Got only one of MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or MONITOR_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
panic!("Got only one of KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
}
(None, None) => {
config.github_webhook_app.installations
@@ -73,129 +73,129 @@ pub fn core_config() -> &'static CoreConfig {
};
// recreating CoreConfig here makes sure we apply all env overrides.
CoreConfig {
title: env.monitor_title.unwrap_or(config.title),
host: env.monitor_host.unwrap_or(config.host),
port: env.monitor_port.unwrap_or(config.port),
passkey: env.monitor_passkey.unwrap_or(config.passkey),
ensure_server: env.monitor_ensure_server.unwrap_or(config.ensure_server),
jwt_secret: env.monitor_jwt_secret.unwrap_or(config.jwt_secret),
title: env.komodo_title.unwrap_or(config.title),
host: env.komodo_host.unwrap_or(config.host),
port: env.komodo_port.unwrap_or(config.port),
passkey: env.komodo_passkey.unwrap_or(config.passkey),
ensure_server: env.komodo_ensure_server.unwrap_or(config.ensure_server),
jwt_secret: env.komodo_jwt_secret.unwrap_or(config.jwt_secret),
jwt_ttl: env
.monitor_jwt_ttl
.komodo_jwt_ttl
.unwrap_or(config.jwt_ttl),
repo_directory: env
.monitor_repo_directory
.komodo_repo_directory
.map(|dir|
dir.parse()
.context("failed to parse env MONITOR_REPO_DIRECTORY as valid path").unwrap())
.context("failed to parse env komodo_REPO_DIRECTORY as valid path").unwrap())
.unwrap_or(config.repo_directory),
stack_poll_interval: env
.monitor_stack_poll_interval
.komodo_stack_poll_interval
.unwrap_or(config.stack_poll_interval),
sync_poll_interval: env
.monitor_sync_poll_interval
.komodo_sync_poll_interval
.unwrap_or(config.sync_poll_interval),
build_poll_interval: env
.monitor_build_poll_interval
.komodo_build_poll_interval
.unwrap_or(config.build_poll_interval),
repo_poll_interval: env
.monitor_repo_poll_interval
.komodo_repo_poll_interval
.unwrap_or(config.repo_poll_interval),
monitoring_interval: env
.monitor_monitoring_interval
.komodo_monitoring_interval
.unwrap_or(config.monitoring_interval),
keep_stats_for_days: env
.monitor_keep_stats_for_days
.komodo_keep_stats_for_days
.unwrap_or(config.keep_stats_for_days),
keep_alerts_for_days: env
.monitor_keep_alerts_for_days
.komodo_keep_alerts_for_days
.unwrap_or(config.keep_alerts_for_days),
webhook_secret: env
.monitor_webhook_secret
.komodo_webhook_secret
.unwrap_or(config.webhook_secret),
webhook_base_url: env
.monitor_webhook_base_url
.komodo_webhook_base_url
.or(config.webhook_base_url),
transparent_mode: env
.monitor_transparent_mode
.komodo_transparent_mode
.unwrap_or(config.transparent_mode),
ui_write_disabled: env
.monitor_ui_write_disabled
.komodo_ui_write_disabled
.unwrap_or(config.ui_write_disabled),
enable_new_users: env.monitor_enable_new_users
enable_new_users: env.komodo_enable_new_users
.unwrap_or(config.enable_new_users),
local_auth: env.monitor_local_auth.unwrap_or(config.local_auth),
local_auth: env.komodo_local_auth.unwrap_or(config.local_auth),
google_oauth: OauthCredentials {
enabled: env
.monitor_google_oauth_enabled
.komodo_google_oauth_enabled
.unwrap_or(config.google_oauth.enabled),
id: env
.monitor_google_oauth_id
.komodo_google_oauth_id
.unwrap_or(config.google_oauth.id),
secret: env
.monitor_google_oauth_secret
.komodo_google_oauth_secret
.unwrap_or(config.google_oauth.secret),
},
github_oauth: OauthCredentials {
enabled: env
.monitor_github_oauth_enabled
.komodo_github_oauth_enabled
.unwrap_or(config.github_oauth.enabled),
id: env
.monitor_github_oauth_id
.komodo_github_oauth_id
.unwrap_or(config.github_oauth.id),
secret: env
.monitor_github_oauth_secret
.komodo_github_oauth_secret
.unwrap_or(config.github_oauth.secret),
},
github_webhook_app: GithubWebhookAppConfig {
app_id: env
.monitor_github_webhook_app_app_id
.komodo_github_webhook_app_app_id
.unwrap_or(config.github_webhook_app.app_id),
pk_path: env
.monitor_github_webhook_app_pk_path
.komodo_github_webhook_app_pk_path
.unwrap_or(config.github_webhook_app.pk_path),
installations,
},
aws: AwsCredentials {
access_key_id: env
.monitor_aws_access_key_id
.komodo_aws_access_key_id
.unwrap_or(config.aws.access_key_id),
secret_access_key: env
.monitor_aws_secret_access_key
.komodo_aws_secret_access_key
.unwrap_or(config.aws.secret_access_key),
},
hetzner: HetznerCredentials {
token: env
.monitor_hetzner_token
.komodo_hetzner_token
.unwrap_or(config.hetzner.token),
},
mongo: MongoConfig {
uri: env.monitor_mongo_uri.or(config.mongo.uri),
address: env.monitor_mongo_address.or(config.mongo.address),
uri: env.komodo_mongo_uri.or(config.mongo.uri),
address: env.komodo_mongo_address.or(config.mongo.address),
username: env
.monitor_mongo_username
.komodo_mongo_username
.or(config.mongo.username),
password: env
.monitor_mongo_password
.komodo_mongo_password
.or(config.mongo.password),
app_name: env
.monitor_mongo_app_name
.komodo_mongo_app_name
.unwrap_or(config.mongo.app_name),
db_name: env
.monitor_mongo_db_name
.komodo_mongo_db_name
.unwrap_or(config.mongo.db_name),
},
logging: LogConfig {
level: env
.monitor_logging_level
.komodo_logging_level
.unwrap_or(config.logging.level),
stdio: env
.monitor_logging_stdio
.komodo_logging_stdio
.unwrap_or(config.logging.stdio),
otlp_endpoint: env
.monitor_logging_otlp_endpoint
.komodo_logging_otlp_endpoint
.or(config.logging.otlp_endpoint),
opentelemetry_service_name: env
.monitor_logging_opentelemetry_service_name
.komodo_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name),
},

View File

@@ -1,5 +1,5 @@
use mongo_indexed::{create_index, create_unique_index};
use monitor_client::entities::{
use komodo_client::entities::{
alert::Alert,
alerter::Alerter,
api_key::ApiKey,
@@ -11,9 +11,10 @@ use monitor_client::entities::{
procedure::Procedure,
provider::{DockerRegistryAccount, GitProviderAccount},
repo::Repo,
server::{stats::SystemStatsRecord, Server},
server::Server,
server_template::ServerTemplate,
stack::Stack,
stats::SystemStatsRecord,
sync::ResourceSync,
tag::Tag,
update::Update,

View File

@@ -1,7 +1,7 @@
use std::sync::{Arc, Mutex};
use anyhow::anyhow;
use monitor_client::{
use komodo_client::{
busy::Busy,
entities::{
build::BuildActionState, deployment::DeploymentActionState,

View File

@@ -1,13 +1,12 @@
use anyhow::{anyhow, Context};
use derive_variants::ExtractVariant;
use futures::future::join_all;
use monitor_client::entities::{
alert::{Alert, AlertData},
use komodo_client::entities::{
alert::{Alert, AlertData, SeverityLevel},
alerter::*,
deployment::DeploymentState,
server::stats::SeverityLevel,
stack::StackState,
update::ResourceTargetVariant,
ResourceTargetVariant,
};
use mungos::{find::find_collect, mongodb::bson::doc};
use slack::types::Block;

View File

@@ -1,5 +1,5 @@
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
use komodo_client::{
api::write::RefreshBuildCache, entities::user::build_user,
};
use mungos::find::find_collect;

View File

@@ -2,9 +2,9 @@ use std::time::Duration;
use anyhow::{anyhow, Context};
use formatting::muted;
use monitor_client::entities::{
use komodo_client::entities::{
builder::{AwsBuilderConfig, Builder, BuilderConfig},
monitor_timestamp,
komodo_timestamp,
server::Server,
server_template::aws::AwsServerTemplateConfig,
update::{Log, Update},
@@ -68,7 +68,7 @@ async fn get_aws_builder(
config: AwsBuilderConfig,
update: &mut Update,
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
let start_create_ts = monitor_timestamp();
let start_create_ts = komodo_timestamp();
let version = version.map(|v| format!("-v{v}")).unwrap_or_default();
let instance_name = format!("BUILDER-{resource_name}{version}");
@@ -85,7 +85,7 @@ async fn get_aws_builder(
success: true,
stdout: start_aws_builder_log(&instance_id, &ip, &config),
start_ts: start_create_ts,
end_ts: monitor_timestamp(),
end_ts: komodo_timestamp(),
..Default::default()
};
@@ -97,7 +97,7 @@ async fn get_aws_builder(
let periphery =
PeripheryClient::new(&periphery_address, &core_config().passkey);
let start_connect_ts = monitor_timestamp();
let start_connect_ts = komodo_timestamp();
let mut res = Ok(GetVersionResponse {
version: String::new(),
});
@@ -115,7 +115,7 @@ async fn get_aws_builder(
version
),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
end_ts: komodo_timestamp(),
..Default::default()
};
update.logs.push(connect_log);

View File

@@ -1,6 +1,6 @@
use std::{collections::HashMap, hash::Hash};
use monitor_client::busy::Busy;
use komodo_client::busy::Busy;
use tokio::sync::RwLock;
#[derive(Default)]

View File

@@ -1,6 +1,6 @@
use std::sync::OnceLock;
use monitor_client::entities::update::{Update, UpdateListItem};
use komodo_client::entities::update::{Update, UpdateListItem};
use tokio::sync::{broadcast, Mutex};
/// A channel sending (build_id, update_id)

View File

@@ -0,0 +1,222 @@
use std::collections::HashSet;
use anyhow::Context;
use komodo_client::entities::{
update::Update, EnvironmentVar, SystemCommand,
};
use super::query::VariablesAndSecrets;
pub fn interpolate_variables_secrets_into_environment(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
environment: &mut Vec<EnvironmentVar>,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
for env in environment {
if env.value.is_empty() {
continue;
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&env.value,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into env var '{}'",
env.variable
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into env var '{}'",
env.variable
)
})?;
secret_replacers.extend(more_replacers);
// set env value with the result
env.value = res;
}
Ok(())
}
pub fn interpolate_variables_secrets_into_extra_args(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
extra_args: &mut Vec<String>,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
for arg in extra_args {
if arg.is_empty() {
continue;
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
arg,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into extra arg '{arg}'",
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into extra arg '{arg}'",
)
})?;
secret_replacers.extend(more_replacers);
// set arg with the result
*arg = res;
}
Ok(())
}
pub fn interpolate_variables_secrets_into_container_command(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
command: &mut String,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
if command.is_empty() {
return Ok(());
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
command,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into command '{command}'",
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into command '{command}'",
)
})?;
secret_replacers.extend(more_replacers);
// set command with the result
*command = res;
Ok(())
}
pub fn interpolate_variables_secrets_into_system_command(
VariablesAndSecrets { variables, secrets }: &VariablesAndSecrets,
command: &mut SystemCommand,
global_replacers: &mut HashSet<(String, String)>,
secret_replacers: &mut HashSet<(String, String)>,
) -> anyhow::Result<()> {
if command.command.is_empty() {
return Ok(());
}
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&command.command,
variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables into command '{}'",
command.command
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate core secrets into command '{}'",
command.command
)
})?;
secret_replacers.extend(more_replacers);
// set command with the result
command.command = res;
Ok(())
}
pub fn add_interp_update_log(
update: &mut Update,
global_replacers: &HashSet<(String, String)>,
secret_replacers: &HashSet<(String, String)>,
) {
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
// Only show names of interpolated secrets
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
}

View File

@@ -1,18 +1,18 @@
use std::{collections::HashSet, str::FromStr, time::Duration};
use std::{str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use mongo_indexed::Document;
use monitor_client::{
use komodo_client::{
api::write::CreateServer,
entities::{
monitor_timestamp,
komodo_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::{Log, ResourceTarget, Update},
update::Log,
user::{system_user, User},
EnvironmentVar,
ResourceTarget,
},
};
use mungos::{
@@ -20,7 +20,6 @@ use mungos::{
mongodb::bson::{doc, oid::ObjectId, to_document, Bson},
};
use periphery_client::PeripheryClient;
use query::get_global_variables;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use resolver_api::Resolve;
@@ -36,6 +35,7 @@ pub mod build;
pub mod builder;
pub mod cache;
pub mod channel;
pub mod interpolate;
pub mod procedure;
pub mod prune;
pub mod query;
@@ -227,73 +227,6 @@ pub fn flatten_document(doc: Document) -> Document {
target
}
/// Returns the secret replacers
pub async fn interpolate_variables_secrets_into_environment(
environment: &mut Vec<EnvironmentVar>,
update: &mut Update,
) -> anyhow::Result<HashSet<(String, String)>> {
// Interpolate variables into environment
let variables = get_global_variables().await?;
let core_config = core_config();
let mut global_replacers = HashSet::new();
let mut secret_replacers = HashSet::new();
for env in environment {
// first pass - global variables
let (res, more_replacers) = svi::interpolate_variables(
&env.value,
&variables,
svi::Interpolator::DoubleBrackets,
false,
)
.with_context(|| {
format!(
"failed to interpolate global variables - {}",
env.variable
)
})?;
global_replacers.extend(more_replacers);
// second pass - core secrets
let (res, more_replacers) = svi::interpolate_variables(
&res,
&core_config.secrets,
svi::Interpolator::DoubleBrackets,
false,
)
.context("failed to interpolate core secrets")?;
secret_replacers.extend(more_replacers);
// set env value with the result
env.value = res;
}
// Show which variables were interpolated
if !global_replacers.is_empty() {
update.push_simple_log(
"interpolate global variables",
global_replacers
.into_iter()
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
if !secret_replacers.is_empty() {
update.push_simple_log(
"interpolate core secrets",
secret_replacers
.iter()
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
.collect::<Vec<_>>()
.join("\n"),
);
}
Ok(secret_replacers)
}
pub async fn startup_cleanup() {
tokio::join!(
startup_in_progress_update_cleanup(),
@@ -304,8 +237,8 @@ pub async fn startup_cleanup() {
/// Run on startup, as no updates should be in progress on startup
async fn startup_in_progress_update_cleanup() {
let log = Log::error(
"monitor shutdown",
String::from("Monitor shutdown during execution. If this is a build, the builder may not have been terminated.")
"Komodo shutdown",
String::from("Komodo shutdown during execution. If this is a build, the builder may not have been terminated.")
);
// This static log won't fail to serialize, unwrap ok.
let log = to_document(&log).unwrap();
@@ -373,7 +306,7 @@ async fn startup_open_alert_cleanup() {
doc! { "_id": { "$in": to_update_ids } },
doc! { "$set": {
"resolved": true,
"resolved_ts": monitor_timestamp()
"resolved_ts": komodo_timestamp()
} },
)
.await
@@ -402,10 +335,11 @@ pub async fn ensure_server() {
if server.is_some() {
return;
}
if let Err(e) = State
.resolve(
CreateServer {
name: String::from("default"),
name: format!("server-{}", random_string(5)),
config: PartialServerConfig {
address: Some(ensure_server.to_string()),
enabled: Some(true),

View File

@@ -3,7 +3,7 @@ use std::time::{Duration, Instant};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use monitor_client::{
use komodo_client::{
api::execute::Execution,
entities::{
procedure::Procedure,
@@ -193,6 +193,166 @@ async fn execute_execution(
)
.await?
}
Execution::StartDeployment(req) => {
let req = ExecuteRequest::StartDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StartDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StartDeployment"),
&update_id,
)
.await?
}
Execution::RestartDeployment(req) => {
let req = ExecuteRequest::RestartDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RestartDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RestartDeployment"),
&update_id,
)
.await?
}
Execution::PauseDeployment(req) => {
let req = ExecuteRequest::PauseDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PauseDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PauseDeployment"),
&update_id,
)
.await?
}
Execution::UnpauseDeployment(req) => {
let req = ExecuteRequest::UnpauseDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::UnpauseDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at UnpauseDeployment"),
&update_id,
)
.await?
}
Execution::StopDeployment(req) => {
let req = ExecuteRequest::StopDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StopDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StopDeployment"),
&update_id,
)
.await?
}
Execution::DestroyDeployment(req) => {
let req = ExecuteRequest::DestroyDeployment(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DestroyDeployment(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RemoveDeployment"),
&update_id,
)
.await?
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CloneRepo(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo"),
&update_id,
)
.await?
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo"),
&update_id,
)
.await?
}
Execution::BuildRepo(req) => {
let req = ExecuteRequest::BuildRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::BuildRepo(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at BuildRepo"),
&update_id,
)
.await?
}
Execution::CancelRepoBuild(req) => {
let req = ExecuteRequest::CancelRepoBuild(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CancelRepoBuild(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CancelRepoBuild"),
&update_id,
)
.await?
}
Execution::StartContainer(req) => {
let req = ExecuteRequest::StartContainer(req);
let update = init_execution_update(&req, &user).await?;
@@ -273,6 +433,86 @@ async fn execute_execution(
)
.await?
}
Execution::DestroyContainer(req) => {
let req = ExecuteRequest::DestroyContainer(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DestroyContainer(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer"),
&update_id,
)
.await?
}
Execution::StartAllContainers(req) => {
let req = ExecuteRequest::StartAllContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::StartAllContainers(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at StartAllContainers"),
&update_id,
)
.await?
}
Execution::RestartAllContainers(req) => {
let req = ExecuteRequest::RestartAllContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RestartAllContainers(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at RestartAllContainers"),
&update_id,
)
.await?
}
Execution::PauseAllContainers(req) => {
let req = ExecuteRequest::PauseAllContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PauseAllContainers(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PauseAllContainers"),
&update_id,
)
.await?
}
Execution::UnpauseAllContainers(req) => {
let req = ExecuteRequest::UnpauseAllContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::UnpauseAllContainers(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at UnpauseAllContainers"),
&update_id,
)
.await?
}
Execution::StopAllContainers(req) => {
let req = ExecuteRequest::StopAllContainers(req);
let update = init_execution_update(&req, &user).await?;
@@ -289,10 +529,10 @@ async fn execute_execution(
)
.await?
}
Execution::RemoveContainer(req) => {
let req = ExecuteRequest::RemoveContainer(req);
Execution::PruneContainers(req) => {
let req = ExecuteRequest::PruneContainers(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::RemoveContainer(req) = req else {
let ExecuteRequest::PruneContainers(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
@@ -300,15 +540,15 @@ async fn execute_execution(
State
.resolve(req, (user, update))
.await
.context("failed at RemoveContainer"),
.context("failed at PruneContainers"),
&update_id,
)
.await?
}
Execution::CloneRepo(req) => {
let req = ExecuteRequest::CloneRepo(req);
Execution::DeleteNetwork(req) => {
let req = ExecuteRequest::DeleteNetwork(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CloneRepo(req) = req else {
let ExecuteRequest::DeleteNetwork(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
@@ -316,55 +556,7 @@ async fn execute_execution(
State
.resolve(req, (user, update))
.await
.context("failed at CloneRepo"),
&update_id,
)
.await?
}
Execution::PullRepo(req) => {
let req = ExecuteRequest::PullRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PullRepo(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PullRepo"),
&update_id,
)
.await?
}
Execution::BuildRepo(req) => {
let req = ExecuteRequest::BuildRepo(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::BuildRepo(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at BuildRepo"),
&update_id,
)
.await?
}
Execution::CancelRepoBuild(req) => {
let req = ExecuteRequest::CancelRepoBuild(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::CancelRepoBuild(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at CancelRepoBuild"),
.context("failed at DeleteNetwork"),
&update_id,
)
.await?
@@ -385,6 +577,22 @@ async fn execute_execution(
)
.await?
}
Execution::DeleteImage(req) => {
let req = ExecuteRequest::DeleteImage(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::DeleteImage(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at DeleteImage"),
&update_id,
)
.await?
}
Execution::PruneImages(req) => {
let req = ExecuteRequest::PruneImages(req);
let update = init_execution_update(&req, &user).await?;
@@ -401,10 +609,10 @@ async fn execute_execution(
)
.await?
}
Execution::PruneContainers(req) => {
let req = ExecuteRequest::PruneContainers(req);
Execution::DeleteVolume(req) => {
let req = ExecuteRequest::DeleteVolume(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PruneContainers(req) = req else {
let ExecuteRequest::DeleteVolume(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
@@ -412,7 +620,39 @@ async fn execute_execution(
State
.resolve(req, (user, update))
.await
.context("failed at PruneContainers"),
.context("failed at DeleteVolume"),
&update_id,
)
.await?
}
Execution::PruneVolumes(req) => {
let req = ExecuteRequest::PruneVolumes(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PruneVolumes(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneVolumes"),
&update_id,
)
.await?
}
Execution::PruneSystem(req) => {
let req = ExecuteRequest::PruneSystem(req);
let update = init_execution_update(&req, &user).await?;
let ExecuteRequest::PruneSystem(req) = req else {
unreachable!()
};
let update_id = update.id.clone();
handle_resolve_result(
State
.resolve(req, (user, update))
.await
.context("failed at PruneSystem"),
&update_id,
)
.await?

View File

@@ -4,7 +4,7 @@ use async_timing_util::{
};
use futures::future::join_all;
use mungos::{find::find_collect, mongodb::bson::doc};
use periphery_client::api::build::PruneImages;
use periphery_client::api::image::PruneImages;
use crate::{config::core_config, state::db_client};

View File

@@ -1,11 +1,12 @@
use std::{collections::HashMap, str::FromStr};
use anyhow::{anyhow, Context};
use monitor_client::entities::{
use komodo_client::entities::{
alerter::Alerter,
build::Build,
builder::Builder,
deployment::{ContainerSummary, Deployment, DeploymentState},
deployment::{Deployment, DeploymentState},
docker::container::{ContainerListItem, ContainerStateStatusEnum},
permission::PermissionLevel,
procedure::Procedure,
repo::Repo,
@@ -14,11 +15,11 @@ use monitor_client::entities::{
stack::{Stack, StackServiceNames, StackState},
sync::ResourceSync,
tag::Tag,
update::{ResourceTarget, ResourceTargetVariant, Update},
update::Update,
user::{admin_service_user, User},
user_group::UserGroup,
variable::Variable,
Operation,
Operation, ResourceTarget, ResourceTargetVariant,
};
use mungos::{
find::find_collect,
@@ -29,17 +30,15 @@ use mungos::{
};
use crate::{
config::core_config,
resource::{self, get_user_permission_on_resource},
state::db_client,
state::{db_client, deployment_status_cache, stack_status_cache},
};
use super::stack::{
compose_container_match_regex,
services::extract_services_from_stack,
};
use super::stack::compose_container_match_regex;
#[instrument(level = "debug")]
// user: Id or username
#[instrument(level = "debug")]
pub async fn get_user(user: &str) -> anyhow::Result<User> {
if let Some(user) = admin_service_user(user) {
return Ok(user);
@@ -54,46 +53,40 @@ pub async fn get_user(user: &str) -> anyhow::Result<User> {
}
#[instrument(level = "debug")]
pub async fn get_server_with_status(
pub async fn get_server_with_state(
server_id_or_name: &str,
) -> anyhow::Result<(Server, ServerState)> {
let server = resource::get::<Server>(server_id_or_name).await?;
let state = get_server_state(&server).await;
Ok((server, state))
}
#[instrument(level = "debug")]
pub async fn get_server_state(server: &Server) -> ServerState {
if !server.config.enabled {
return Ok((server, ServerState::Disabled));
return ServerState::Disabled;
}
let status = match super::periphery_client(&server)?
// Unwrap ok: Server disabled check above
match super::periphery_client(server)
.unwrap()
.request(periphery_client::api::GetHealth {})
.await
{
Ok(_) => ServerState::Ok,
Err(_) => ServerState::NotOk,
};
Ok((server, status))
}
}
#[instrument(level = "debug")]
pub async fn get_deployment_state(
deployment: &Deployment,
) -> anyhow::Result<DeploymentState> {
if deployment.config.server_id.is_empty() {
return Ok(DeploymentState::NotDeployed);
}
let (server, status) =
get_server_with_status(&deployment.config.server_id).await?;
if status != ServerState::Ok {
return Ok(DeploymentState::Unknown);
}
let container = super::periphery_client(&server)?
.request(periphery_client::api::container::GetContainerList {})
.await?
.into_iter()
.find(|container| container.name == deployment.name);
let state = match container {
Some(container) => container.state,
None => DeploymentState::NotDeployed,
};
let state = deployment_status_cache()
.get(&deployment.id)
.await
.unwrap_or_default()
.curr
.state;
Ok(state)
}
@@ -101,7 +94,7 @@ pub async fn get_deployment_state(
pub fn get_stack_state_from_containers(
ignore_services: &[String],
services: &[StackServiceNames],
containers: &[ContainerSummary],
containers: &[ContainerListItem],
) -> StackState {
// first filter the containers to only ones which match the service
let services = services
@@ -129,39 +122,39 @@ pub fn get_stack_state_from_containers(
if services.len() != containers.len() {
return StackState::Unhealthy;
}
let running = containers
.iter()
.all(|container| container.state == DeploymentState::Running);
let running = containers.iter().all(|container| {
container.state == ContainerStateStatusEnum::Running
});
if running {
return StackState::Running;
}
let paused = containers
.iter()
.all(|container| container.state == DeploymentState::Paused);
let paused = containers.iter().all(|container| {
container.state == ContainerStateStatusEnum::Paused
});
if paused {
return StackState::Paused;
}
let stopped = containers
.iter()
.all(|container| container.state == DeploymentState::Exited);
let stopped = containers.iter().all(|container| {
container.state == ContainerStateStatusEnum::Exited
});
if stopped {
return StackState::Stopped;
}
let restarting = containers
.iter()
.all(|container| container.state == DeploymentState::Restarting);
let restarting = containers.iter().all(|container| {
container.state == ContainerStateStatusEnum::Restarting
});
if restarting {
return StackState::Restarting;
}
let dead = containers
.iter()
.all(|container| container.state == DeploymentState::Dead);
let dead = containers.iter().all(|container| {
container.state == ContainerStateStatusEnum::Dead
});
if dead {
return StackState::Dead;
}
let removing = containers
.iter()
.all(|container| container.state == DeploymentState::Removing);
let removing = containers.iter().all(|container| {
container.state == ContainerStateStatusEnum::Removing
});
if removing {
return StackState::Removing;
}
@@ -175,22 +168,13 @@ pub async fn get_stack_state(
if stack.config.server_id.is_empty() {
return Ok(StackState::Down);
}
let (server, status) =
get_server_with_status(&stack.config.server_id).await?;
if status != ServerState::Ok {
return Ok(StackState::Unknown);
}
let containers = super::periphery_client(&server)?
.request(periphery_client::api::container::GetContainerList {})
.await?;
let services = extract_services_from_stack(stack, false).await?;
Ok(get_stack_state_from_containers(
&stack.config.ignore_services,
&services,
&containers,
))
let state = stack_status_cache()
.get(&stack.id)
.await
.unwrap_or_default()
.curr
.state;
Ok(state)
}
#[instrument(level = "debug")]
@@ -329,18 +313,6 @@ pub fn id_or_username_filter(id_or_username: &str) -> Document {
}
}
pub async fn get_global_variables(
) -> anyhow::Result<HashMap<String, String>> {
Ok(
find_collect(&db_client().await.variables, None, None)
.await
.context("failed to get all variables from db")?
.into_iter()
.map(|variable| (variable.name, variable.value))
.collect(),
)
}
pub async fn get_variable(name: &str) -> anyhow::Result<Variable> {
db_client()
.await
@@ -374,3 +346,33 @@ pub async fn get_latest_update(
.await
.context("failed to query db for latest update")
}
pub struct VariablesAndSecrets {
pub variables: HashMap<String, String>,
pub secrets: HashMap<String, String>,
}
pub async fn get_variables_and_secrets(
) -> anyhow::Result<VariablesAndSecrets> {
let variables =
find_collect(&db_client().await.variables, None, None)
.await
.context("failed to get all variables from db")?;
let mut secrets = core_config().secrets.clone();
// extend secrets with secret variables
secrets.extend(
variables.iter().filter(|variable| variable.is_secret).map(
|variable| (variable.name.clone(), variable.value.clone()),
),
);
// collect non secret variables
let variables = variables
.into_iter()
.filter(|variable| !variable.is_secret)
.map(|variable| (variable.name, variable.value))
.collect();
Ok(VariablesAndSecrets { variables, secrets })
}

View File

@@ -1,5 +1,5 @@
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
use komodo_client::{
api::write::RefreshRepoCache, entities::user::repo_user,
};
use mungos::find::find_collect;

View File

@@ -1,4 +1,4 @@
use monitor_client::{
use komodo_client::{
api::execute::*,
entities::{
permission::PermissionLevel,

View File

@@ -1,6 +1,6 @@
use anyhow::{anyhow, Context};
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
use komodo_client::{
api::write::RefreshStackCache,
entities::{
permission::PermissionLevel,
@@ -19,7 +19,7 @@ use crate::{
state::{db_client, State},
};
use super::query::get_server_with_status;
use super::query::get_server_with_state;
pub mod execute;
pub mod remote;
@@ -81,7 +81,7 @@ pub async fn get_stack_and_server(
}
let (server, status) =
get_server_with_status(&stack.config.server_id).await?;
get_server_with_state(&stack.config.server_id).await?;
if block_if_server_unreachable && status != ServerState::Ok {
return Err(anyhow!(
"cannot send action when server is unreachable or disabled"

View File

@@ -5,7 +5,7 @@ use std::{
use anyhow::{anyhow, Context};
use formatting::format_serror;
use monitor_client::entities::{
use komodo_client::entities::{
stack::{ComposeContents, Stack},
update::Log,
CloneArgs,
@@ -118,6 +118,7 @@ pub async fn clone_remote_repo(
&[],
"",
None,
&[],
)
.await
.context("failed to clone stack repo")

View File

@@ -1,5 +1,5 @@
use anyhow::Context;
use monitor_client::entities::stack::{
use komodo_client::entities::stack::{
ComposeContents, ComposeFile, ComposeService, Stack,
StackServiceNames,
};

View File

@@ -3,7 +3,7 @@ use std::{collections::HashMap, time::Duration};
use anyhow::{anyhow, Context};
use formatting::{bold, colored, format_serror, muted, Color};
use futures::future::join_all;
use monitor_client::{
use komodo_client::{
api::{
execute::{Deploy, DeployStack},
read::ListBuildVersions,
@@ -16,8 +16,9 @@ use monitor_client::{
stack::{PartialStackConfig, Stack, StackConfig, StackState},
sync::SyncDeployUpdate,
toml::ResourceToml,
update::{Log, ResourceTarget},
update::Log,
user::sync_user,
ResourceTarget,
},
};
use resolver_api::Resolve;
@@ -55,6 +56,9 @@ pub async fn deploy_from_cache(
mut to_deploy: ToDeployCache,
logs: &mut Vec<Log>,
) {
if to_deploy.is_empty() {
return;
}
let mut log = format!(
"{}: running executions to sync deployment / stack state",
muted("INFO")
@@ -409,7 +413,7 @@ fn build_cache_for_deployment<'a>(
let deployed_version = status
.container
.as_ref()
.and_then(|c| c.image.split(':').last())
.and_then(|c| c.image.as_ref()?.split(':').last())
.unwrap_or("0.0.0");
match build_version_cache.get(build_id) {
Some(version) if deployed_version != version => {

View File

@@ -2,7 +2,7 @@ use std::{fs, path::Path};
use anyhow::{anyhow, Context};
use formatting::{colored, muted, Color};
use monitor_client::entities::{toml::ResourcesToml, update::Log};
use komodo_client::entities::{toml::ResourcesToml, update::Log};
use serde::de::DeserializeOwned;
pub fn read_resources(

View File

@@ -1,5 +1,5 @@
use async_timing_util::{wait_until_timelength, Timelength};
use monitor_client::{
use komodo_client::{
api::write::RefreshResourceSyncPending, entities::user::sync_user,
};
use mungos::find::find_collect;

View File

@@ -1,7 +1,7 @@
use std::fs;
use anyhow::{anyhow, Context};
use monitor_client::entities::{
use komodo_client::entities::{
sync::ResourceSync, toml::ResourcesToml, update::Log, CloneArgs,
};
@@ -66,6 +66,7 @@ pub async fn get_remote_resources(
&[],
"",
None,
&[],
)
.await
.context("failed to clone resource repo")?;

View File

@@ -2,31 +2,21 @@ use std::collections::HashMap;
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
use komodo_client::{
api::write::{UpdateDescription, UpdateTagsOnResource},
entities::{
self,
alerter::Alerter,
build::Build,
builder::Builder,
deployment::Deployment,
procedure::Procedure,
repo::Repo,
server::Server,
server_template::ServerTemplate,
stack::Stack,
sync::SyncUpdate,
tag::Tag,
toml::ResourceToml,
update::{Log, ResourceTarget},
user::sync_user,
self, alerter::Alerter, build::Build, builder::Builder,
deployment::Deployment, procedure::Procedure, repo::Repo,
server::Server, server_template::ServerTemplate, stack::Stack,
sync::SyncUpdate, tag::Tag, toml::ResourceToml, update::Log,
user::sync_user, ResourceTarget,
},
};
use mungos::find::find_collect;
use partial_derive2::{Diff, FieldDiff, MaybeNone};
use resolver_api::Resolve;
use crate::{resource::MonitorResource, state::State};
use crate::{resource::KomodoResource, state::State};
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
pub type ToCreate<T> = Vec<ResourceToml<T>>;
@@ -42,7 +32,7 @@ pub struct ToUpdateItem<T: Default> {
pub update_tags: bool,
}
pub trait ResourceSync: MonitorResource + Sized {
pub trait ResourceSync: KomodoResource + Sized {
fn resource_target(id: String) -> ResourceTarget;
/// Apply any changes to incoming toml partial config

View File

@@ -1,5 +1,5 @@
use formatting::{bold, colored, muted, Color};
use monitor_client::{
use komodo_client::{
api::execute::Execution,
entities::{
self,
@@ -12,8 +12,9 @@ use monitor_client::{
server::Server,
server_template::ServerTemplate,
stack::Stack,
update::{Log, ResourceTarget},
update::Log,
user::sync_user,
ResourceTarget,
},
};
use partial_derive2::{MaybeNone, PartialDiff};
@@ -23,7 +24,7 @@ use crate::{
run_update_description, run_update_tags, ResourceSync,
ToUpdateItem,
},
resource::MonitorResource,
resource::KomodoResource,
};
use super::resource::{
@@ -266,42 +267,42 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
Execution::StartDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RestartContainer(config) => {
Execution::RestartDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PauseContainer(config) => {
Execution::PauseDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::UnpauseContainer(config) => {
Execution::UnpauseDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
Execution::StopDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RemoveContainer(config) => {
Execution::DestroyDeployment(config) => {
config.deployment = resources
.deployments
.get(&config.deployment)
@@ -336,6 +337,76 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartContainer(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RestartContainer(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PauseContainer(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::UnpauseContainer(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopContainer(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::DestroyContainer(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StartAllContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::RestartAllContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PauseAllContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::UnpauseAllContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::StopAllContainers(config) => {
config.server = resources
.servers
@@ -343,6 +414,20 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneContainers(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::DeleteNetwork(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneNetworks(config) => {
config.server = resources
.servers
@@ -350,6 +435,13 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::DeleteImage(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneImages(config) => {
config.server = resources
.servers
@@ -357,7 +449,21 @@ impl ResourceSync for Procedure {
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneContainers(config) => {
Execution::DeleteVolume(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneVolumes(config) => {
config.server = resources
.servers
.get(&config.server)
.map(|d| d.name.clone())
.unwrap_or_default();
}
Execution::PruneSystem(config) => {
config.server = resources
.servers
.get(&config.server)

View File

@@ -2,7 +2,7 @@ use std::{cmp::Ordering, collections::HashMap};
use anyhow::Context;
use formatting::{bold, colored, muted, Color};
use monitor_client::{
use komodo_client::{
api::{
read::ListUserTargetPermissions,
write::{
@@ -14,8 +14,9 @@ use monitor_client::{
permission::{PermissionLevel, UserTarget},
sync::SyncUpdate,
toml::{PermissionToml, UserGroupToml},
update::{Log, ResourceTarget, ResourceTargetVariant},
update::Log,
user::sync_user,
ResourceTarget, ResourceTargetVariant,
},
};
use mungos::find::find_collect;

Some files were not shown because too many files have changed in this diff Show More