forked from github-starred/komodo
Compare commits
175 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e029e94f0d | ||
|
|
3be2b5163b | ||
|
|
6a145f58ff | ||
|
|
f1cede2ebd | ||
|
|
a5cfa1d412 | ||
|
|
a0674654c1 | ||
|
|
3faa1c58c1 | ||
|
|
7e296f34af | ||
|
|
9f8ced190c | ||
|
|
c194bb16d8 | ||
|
|
39fec9b55e | ||
|
|
e97ed9888d | ||
|
|
559102ffe3 | ||
|
|
6bf80ddcc7 | ||
|
|
89dbe1b4d9 | ||
|
|
334e16d646 | ||
|
|
a7bbe519f4 | ||
|
|
5827486c5a | ||
|
|
8ca8f7eddd | ||
|
|
0600276b43 | ||
|
|
a77a1495c7 | ||
|
|
021ed5d15f | ||
|
|
7d4376f426 | ||
|
|
7e9b406a34 | ||
|
|
dcf78b05b3 | ||
|
|
3236302d05 | ||
|
|
fc41258d6c | ||
|
|
ae8df90361 | ||
|
|
7d05b2677f | ||
|
|
2f55468a4c | ||
|
|
a20bd2c23f | ||
|
|
b3aa0ffa78 | ||
|
|
8e58a283cd | ||
|
|
9b2d9932ef | ||
|
|
7cb093ade1 | ||
|
|
e2f73d8474 | ||
|
|
12abd5a5bd | ||
|
|
f349cdf50d | ||
|
|
796bcac952 | ||
|
|
fed05684aa | ||
|
|
80a91584a8 | ||
|
|
12d05e9a25 | ||
|
|
f4d06c91ff | ||
|
|
5d7449529f | ||
|
|
a0021d1785 | ||
|
|
bbd23e3f5f | ||
|
|
71841a8e41 | ||
|
|
5228ffd9b8 | ||
|
|
a06f506e54 | ||
|
|
71d6a55e50 | ||
|
|
d16c03dd2a | ||
|
|
6abd9a6554 | ||
|
|
5f04e881a5 | ||
|
|
5fc0a87dea | ||
|
|
2463ed3879 | ||
|
|
a2758ce6f4 | ||
|
|
3f1788dbbb | ||
|
|
33a0560af6 | ||
|
|
610a10c488 | ||
|
|
39b217687d | ||
|
|
2f73461979 | ||
|
|
aae9bb9e51 | ||
|
|
7d011d93fa | ||
|
|
bffdea4357 | ||
|
|
790566bf79 | ||
|
|
b17db93f13 | ||
|
|
daa2ea9361 | ||
|
|
176fb04707 | ||
|
|
5ba1254cdb | ||
|
|
43593162b0 | ||
|
|
418f359492 | ||
|
|
3cded60166 | ||
|
|
6f70f9acb0 | ||
|
|
6e1064e58e | ||
|
|
d96e5b4c46 | ||
|
|
5a8822c7d2 | ||
|
|
1f2d236228 | ||
|
|
a89bd4a36d | ||
|
|
0b40dff72b | ||
|
|
59874f0a92 | ||
|
|
14e459b32e | ||
|
|
f6c55b7be1 | ||
|
|
460819a145 | ||
|
|
91f4df8ac2 | ||
|
|
6a19e18539 | ||
|
|
30c5fa3569 | ||
|
|
4b6aa1d73d | ||
|
|
5dfd007580 | ||
|
|
955670d979 | ||
|
|
f70e359f14 | ||
|
|
a2b0981f76 | ||
|
|
49a8e581bf | ||
|
|
2d0c1724db | ||
|
|
20ae1c22d7 | ||
|
|
e8d75b2a3d | ||
|
|
e23d68f86a | ||
|
|
2111976450 | ||
|
|
8a0109522b | ||
|
|
8d75fa3f2f | ||
|
|
197e938346 | ||
|
|
6ba0184551 | ||
|
|
c456b67018 | ||
|
|
02e152af4d | ||
|
|
392e691f92 | ||
|
|
495e208ccd | ||
|
|
14474adb90 | ||
|
|
896784e2e3 | ||
|
|
2e690bce24 | ||
|
|
7172d24512 | ||
|
|
b754c89118 | ||
|
|
31a23dfe2d | ||
|
|
b0f80cafc3 | ||
|
|
85a16f6c6f | ||
|
|
29a7e4c27b | ||
|
|
a73b572725 | ||
|
|
aa44bf04e8 | ||
|
|
93348621c5 | ||
|
|
4b2139ede2 | ||
|
|
3251216be7 | ||
|
|
1f980a45e8 | ||
|
|
94da1dce99 | ||
|
|
d4fc015494 | ||
|
|
5800fc91d2 | ||
|
|
91785e1e8f | ||
|
|
41fccdb16e | ||
|
|
78cf93da8a | ||
|
|
ea36549dbe | ||
|
|
a319095869 | ||
|
|
a6d7a80cbc | ||
|
|
20f051c890 | ||
|
|
2fef954ad5 | ||
|
|
e1b9367ee3 | ||
|
|
c7717fbfdf | ||
|
|
bf918042c3 | ||
|
|
46ac16100d | ||
|
|
eca0378c56 | ||
|
|
bfd5c5390d | ||
|
|
db41878278 | ||
|
|
26468ed8ea | ||
|
|
707751708d | ||
|
|
d28d3422a3 | ||
|
|
9e2b1ede93 | ||
|
|
37e37deb04 | ||
|
|
e73a6ca72c | ||
|
|
6082b7b1bd | ||
|
|
678767c24b | ||
|
|
59cb86d599 | ||
|
|
5f0a9ad652 | ||
|
|
fc758121da | ||
|
|
95ccf1af0b | ||
|
|
627f7ab585 | ||
|
|
4238abf61a | ||
|
|
66bfe69983 | ||
|
|
42b493ae10 | ||
|
|
f4d6c50b67 | ||
|
|
17176a7d56 | ||
|
|
140b95b70c | ||
|
|
3a2cb73088 | ||
|
|
4585533bc5 | ||
|
|
83099f03a1 | ||
|
|
9e619c0250 | ||
|
|
edf49dc685 | ||
|
|
beffc8c159 | ||
|
|
d99cf87da0 | ||
|
|
8e19eb7b0f | ||
|
|
78a0b56c73 | ||
|
|
bf5dc52237 | ||
|
|
482ea59d4c | ||
|
|
7740d36f49 | ||
|
|
820754deda | ||
|
|
4219884198 | ||
|
|
d9e24cc35a | ||
|
|
8d2ce884d9 | ||
|
|
313b000e64 | ||
|
|
c2f9e29605 |
2
.cargo/config.toml
Normal file
2
.cargo/config.toml
Normal file
@@ -0,0 +1,2 @@
|
||||
[build]
|
||||
rustflags = ["-Wunused-crate-dependencies"]
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -6,4 +6,6 @@ dist
|
||||
.env
|
||||
.env.development
|
||||
creds.toml
|
||||
core.config.toml
|
||||
.syncs
|
||||
.stacks
|
||||
.DS_Store
|
||||
93
.vscode/tasks.json
vendored
93
.vscode/tasks.json
vendored
@@ -1,93 +0,0 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "build",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"label": "rust: cargo build"
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "fmt",
|
||||
"label": "rust: cargo fmt"
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "check",
|
||||
"label": "rust: cargo check"
|
||||
},
|
||||
{
|
||||
"label": "start dev",
|
||||
"dependsOn": [
|
||||
"run core",
|
||||
"start frontend"
|
||||
],
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "yarn start",
|
||||
"label": "start frontend",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/frontend"
|
||||
},
|
||||
"presentation": {
|
||||
"group": "start"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run core",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/bin/core"
|
||||
},
|
||||
"presentation": {
|
||||
"group": "start"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run periphery",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/bin/periphery"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run tests",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/bin/tests"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "publish",
|
||||
"args": ["--allow-dirty"],
|
||||
"label": "publish types",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/lib/types"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "publish",
|
||||
"label": "publish rs client",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/lib/rs_client"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "node ./client/ts/generate_types.mjs",
|
||||
"label": "generate typescript types",
|
||||
"problemMatcher": []
|
||||
}
|
||||
]
|
||||
}
|
||||
2281
Cargo.lock
generated
2281
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
85
Cargo.toml
85
Cargo.toml
@@ -3,20 +3,22 @@ resolver = "2"
|
||||
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.11.0"
|
||||
version = "1.15.2"
|
||||
edition = "2021"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/mbecker20/monitor"
|
||||
homepage = "https://docs.monitor.mogh.tech"
|
||||
repository = "https://github.com/mbecker20/komodo"
|
||||
homepage = "https://komo.do"
|
||||
|
||||
[patch.crates-io]
|
||||
monitor_client = { path = "client/core/rs" }
|
||||
# komodo_client = { path = "client/core/rs" }
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
monitor_client = "1.11.0"
|
||||
# komodo_client = "1.14.3"
|
||||
komodo_client = { path = "client/core/rs" }
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
environment_file = { path = "lib/environment_file" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
command = { path = "lib/command" }
|
||||
logger = { path = "lib/logger" }
|
||||
@@ -24,61 +26,63 @@ git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.4.3", default-features = false }
|
||||
slack = { version = "0.1.0", package = "slack_client_rs" }
|
||||
serror = { version = "0.4.6", default-features = false }
|
||||
slack = { version = "0.2.0", package = "slack_client_rs" }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
merge_config_files = "0.1.5"
|
||||
async_timing_util = "1.0.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "1.0.0"
|
||||
mongo_indexed = "2.0.1"
|
||||
resolver_api = "1.1.1"
|
||||
toml_pretty = "1.1.2"
|
||||
parse_csl = "0.1.0"
|
||||
mungos = "1.0.0"
|
||||
mungos = "1.1.0"
|
||||
svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
tokio = { version = "1.38.0", features = ["full"] }
|
||||
reqwest = { version = "0.12.5", features = ["json"] }
|
||||
tokio-util = "0.7.11"
|
||||
reqwest = { version = "0.12.8", features = ["json"] }
|
||||
tokio = { version = "1.38.1", features = ["full"] }
|
||||
tokio-util = "0.7.12"
|
||||
futures = "0.3.30"
|
||||
futures-util = "0.3.30"
|
||||
|
||||
# SERVER
|
||||
axum = { version = "0.7.5", features = ["ws", "json"] }
|
||||
axum-extra = { version = "0.9.3", features = ["typed-header"] }
|
||||
tower = { version = "0.4.13", features = ["timeout"] }
|
||||
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
|
||||
tokio-tungstenite = "0.23.1"
|
||||
axum-extra = { version = "0.9.4", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.1", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.1", features = ["tls-openssl"] }
|
||||
axum = { version = "0.7.7", features = ["ws", "json"] }
|
||||
tokio-tungstenite = "0.24.0"
|
||||
|
||||
# SER/DE
|
||||
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
strum = { version = "0.26.2", features = ["derive"] }
|
||||
serde_json = "1.0.118"
|
||||
toml = "0.8.14"
|
||||
serde = { version = "1.0.210", features = ["derive"] }
|
||||
strum = { version = "0.26.3", features = ["derive"] }
|
||||
serde_json = "1.0.128"
|
||||
serde_yaml = "0.9.34"
|
||||
toml = "0.8.19"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.86"
|
||||
thiserror = "1.0.61"
|
||||
anyhow = "1.0.89"
|
||||
thiserror = "1.0.64"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
|
||||
opentelemetry_sdk = { version = "0.25.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.18", features = ["json"] }
|
||||
tracing-opentelemetry = "0.24.0"
|
||||
opentelemetry-otlp = "0.16.0"
|
||||
opentelemetry = "0.23.0"
|
||||
opentelemetry-semantic-conventions = "0.25.0"
|
||||
tracing-opentelemetry = "0.26.0"
|
||||
opentelemetry-otlp = "0.25.0"
|
||||
opentelemetry = "0.25.0"
|
||||
tracing = "0.1.40"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.7", features = ["derive"] }
|
||||
dotenv = "0.15.0"
|
||||
clap = { version = "4.5.19", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO
|
||||
uuid = { version = "1.9.1", features = ["v4", "fast-rng", "serde"] }
|
||||
# CRYPTO / AUTH
|
||||
uuid = { version = "1.10.0", features = ["v4", "fast-rng", "serde"] }
|
||||
openidconnect = "3.5.0"
|
||||
urlencoding = "2.1.3"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.15.1"
|
||||
@@ -90,19 +94,18 @@ jwt = "0.16.0"
|
||||
hex = "0.4.3"
|
||||
|
||||
# SYSTEM
|
||||
bollard = "0.16.1"
|
||||
sysinfo = "0.30.12"
|
||||
bollard = "0.17.1"
|
||||
sysinfo = "0.31.4"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.5.3"
|
||||
aws-sdk-ec2 = "1.53.0"
|
||||
aws-sdk-ecr = "1.33.0"
|
||||
aws-config = "1.5.7"
|
||||
aws-sdk-ec2 = "1.75.0"
|
||||
|
||||
# MISC
|
||||
derive_builder = "0.20.0"
|
||||
derive_builder = "0.20.1"
|
||||
typeshare = "1.0.3"
|
||||
octorust = "0.7.0"
|
||||
dashmap = "6.1.0"
|
||||
colored = "2.1.0"
|
||||
regex = "1.10.5"
|
||||
bson = "2.11.0"
|
||||
|
||||
regex = "1.11.0"
|
||||
bson = "2.13.0"
|
||||
@@ -11,7 +11,7 @@ repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client.workspace = true
|
||||
komodo_client.workspace = true
|
||||
logger.workspace = true
|
||||
# external
|
||||
tokio.workspace = true
|
||||
@@ -19,5 +19,5 @@ tracing.workspace = true
|
||||
axum.workspace = true
|
||||
anyhow.workspace = true
|
||||
serde.workspace = true
|
||||
dotenv.workspace = true
|
||||
dotenvy.workspace = true
|
||||
envy.workspace = true
|
||||
@@ -1,11 +1,11 @@
|
||||
FROM rust:1.71.1 as builder
|
||||
FROM rust:1.80.1 as builder
|
||||
WORKDIR /builder
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cargo build -p alert_logger --release
|
||||
|
||||
FROM gcr.io/distroless/cc
|
||||
FROM gcr.io/distroless/debian-cc
|
||||
|
||||
COPY --from=builder /builder/target/release/alert_logger /
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Alerter
|
||||
|
||||
This crate sets up a basic axum server that listens for incoming alert POSTs.
|
||||
It can be used as a monitor alerting endpoint, and serves as a template for other custom alerter implementations.
|
||||
It can be used as a Komodo alerting endpoint, and serves as a template for other custom alerter implementations.
|
||||
@@ -5,9 +5,7 @@ use std::{net::SocketAddr, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{routing::post, Json, Router};
|
||||
use monitor_client::entities::{
|
||||
alert::Alert, server::stats::SeverityLevel,
|
||||
};
|
||||
use komodo_client::entities::alert::{Alert, SeverityLevel};
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -21,7 +19,7 @@ fn default_port() -> u16 {
|
||||
}
|
||||
|
||||
async fn app() -> anyhow::Result<()> {
|
||||
dotenv::dotenv().ok();
|
||||
dotenvy::dotenv().ok();
|
||||
logger::init(&Default::default())?;
|
||||
|
||||
let Env { port } =
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "monitor_cli"
|
||||
description = "Command line tool to sync monitor resources and execute file defined procedures"
|
||||
name = "komodo_cli"
|
||||
description = "Command line tool to execute Komodo actions"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
@@ -9,26 +9,21 @@ homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "monitor"
|
||||
name = "komodo"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client.workspace = true
|
||||
# mogh
|
||||
partial_derive2.workspace = true
|
||||
komodo_client.workspace = true
|
||||
# external
|
||||
tracing-subscriber.workspace = true
|
||||
merge_config_files.workspace = true
|
||||
serde_json.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
colored.workspace = true
|
||||
anyhow.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
toml.workspace = true
|
||||
clap.workspace = true
|
||||
|
||||
@@ -1,20 +1,22 @@
|
||||
# Monitor CLI
|
||||
# Komodo CLI
|
||||
|
||||
Monitor CLI is a tool to sync monitor resources and execute operations.
|
||||
Komodo CLI is a tool to execute actions on your Komodo instance from shell scripts.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
cargo install monitor_cli
|
||||
cargo install komodo_cli
|
||||
```
|
||||
|
||||
Note: On Ubuntu, also requires `apt install build-essential pkg-config libssl-dev`.
|
||||
|
||||
## Usage
|
||||
|
||||
### Credentials
|
||||
|
||||
Configure a file `~/.config/monitor/creds.toml` file with contents:
|
||||
Configure a file `~/.config/komodo/creds.toml` file with contents:
|
||||
```toml
|
||||
url = "https://your.monitor.address"
|
||||
url = "https://your.komodo.address"
|
||||
key = "YOUR-API-KEY"
|
||||
secret = "YOUR-API-SECRET"
|
||||
```
|
||||
@@ -23,64 +25,88 @@ Note. You can specify a different creds file by using `--creds ./other/path.toml
|
||||
You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`:
|
||||
|
||||
```sh
|
||||
monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
|
||||
```
|
||||
|
||||
### Run Syncs
|
||||
|
||||
```sh
|
||||
## Sync resources in a single file
|
||||
monitor sync ./resources/deployments.toml
|
||||
|
||||
## Sync resources gathered across multiple files in a directory
|
||||
monitor sync ./resources
|
||||
|
||||
## Path defaults to './resources', in this case you can just use:
|
||||
monitor sync
|
||||
```
|
||||
|
||||
#### Manual
|
||||
```md
|
||||
Runs syncs on resource files
|
||||
|
||||
Usage: monitor sync [OPTIONS] [PATH]
|
||||
|
||||
Arguments:
|
||||
[PATH] The path of the resource folder / file Folder paths will recursively incorporate all the resources it finds under the folder [default: ./resources]
|
||||
|
||||
Options:
|
||||
--delete Will delete any resources that aren't included in the resource files
|
||||
-h, --help Print help
|
||||
komodo --url "https://your.komodo.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
|
||||
```
|
||||
|
||||
### Run Executions
|
||||
|
||||
```sh
|
||||
# Triggers an example build
|
||||
monitor execute run-build test_build
|
||||
komodo execute run-build test_build
|
||||
```
|
||||
|
||||
#### Manual
|
||||
`komodo --help`
|
||||
```md
|
||||
Command line tool to execute Komodo actions
|
||||
|
||||
Usage: komodo [OPTIONS] <COMMAND>
|
||||
|
||||
Commands:
|
||||
execute Runs an execution
|
||||
help Print this message or the help of the given subcommand(s)
|
||||
|
||||
Options:
|
||||
--creds <CREDS> The path to a creds file [default: /Users/max/.config/komodo/creds.toml]
|
||||
--url <URL> Pass url in args instead of creds file
|
||||
--key <KEY> Pass api key in args instead of creds file
|
||||
--secret <SECRET> Pass api secret in args instead of creds file
|
||||
-y, --yes Always continue on user confirmation prompts
|
||||
-h, --help Print help (see more with '--help')
|
||||
-V, --version Print version
|
||||
```
|
||||
|
||||
`komodo execute --help`
|
||||
```md
|
||||
Runs an execution
|
||||
|
||||
Usage: monitor execute <COMMAND>
|
||||
Usage: komodo execute <COMMAND>
|
||||
|
||||
Commands:
|
||||
none The "null" execution. Does nothing
|
||||
run-procedure Runs the target procedure. Response: [Update]
|
||||
run-build Runs the target build. Response: [Update]
|
||||
deploy Deploys the container for the target deployment. Response: [Update]
|
||||
start-container Starts the container for the target deployment. Response: [Update]
|
||||
stop-container Stops the container for the target deployment. Response: [Update]
|
||||
stop-all-containers Stops all deployments on the target server. Response: [Update]
|
||||
remove-container Stops and removes the container for the target deployment. Reponse: [Update]
|
||||
clone-repo Clones the target repo. Response: [Update]
|
||||
pull-repo Pulls the target repo. Response: [Update]
|
||||
prune-networks Prunes the docker networks on the target server. Response: [Update]
|
||||
prune-images Prunes the docker images on the target server. Response: [Update]
|
||||
prune-containers Prunes the docker containers on the target server. Response: [Update]
|
||||
help Print this message or the help of the given subcommand(s)
|
||||
none The "null" execution. Does nothing
|
||||
run-procedure Runs the target procedure. Response: [Update]
|
||||
run-build Runs the target build. Response: [Update]
|
||||
cancel-build Cancels the target build. Only does anything if the build is `building` when called. Response: [Update]
|
||||
deploy Deploys the container for the target deployment. Response: [Update]
|
||||
start-deployment Starts the container for the target deployment. Response: [Update]
|
||||
restart-deployment Restarts the container for the target deployment. Response: [Update]
|
||||
pause-deployment Pauses the container for the target deployment. Response: [Update]
|
||||
unpause-deployment Unpauses the container for the target deployment. Response: [Update]
|
||||
stop-deployment Stops the container for the target deployment. Response: [Update]
|
||||
destroy-deployment Stops and destroys the container for the target deployment. Reponse: [Update]
|
||||
clone-repo Clones the target repo. Response: [Update]
|
||||
pull-repo Pulls the target repo. Response: [Update]
|
||||
build-repo Builds the target repo, using the attached builder. Response: [Update]
|
||||
cancel-repo-build Cancels the target repo build. Only does anything if the repo build is `building` when called. Response: [Update]
|
||||
start-container Starts the container on the target server. Response: [Update]
|
||||
restart-container Restarts the container on the target server. Response: [Update]
|
||||
pause-container Pauses the container on the target server. Response: [Update]
|
||||
unpause-container Unpauses the container on the target server. Response: [Update]
|
||||
stop-container Stops the container on the target server. Response: [Update]
|
||||
destroy-container Stops and destroys the container on the target server. Reponse: [Update]
|
||||
start-all-containers Starts all containers on the target server. Response: [Update]
|
||||
restart-all-containers Restarts all containers on the target server. Response: [Update]
|
||||
pause-all-containers Pauses all containers on the target server. Response: [Update]
|
||||
unpause-all-containers Unpauses all containers on the target server. Response: [Update]
|
||||
stop-all-containers Stops all containers on the target server. Response: [Update]
|
||||
prune-containers Prunes the docker containers on the target server. Response: [Update]
|
||||
delete-network Delete a docker network. Response: [Update]
|
||||
prune-networks Prunes the docker networks on the target server. Response: [Update]
|
||||
delete-image Delete a docker image. Response: [Update]
|
||||
prune-images Prunes the docker images on the target server. Response: [Update]
|
||||
delete-volume Delete a docker volume. Response: [Update]
|
||||
prune-volumes Prunes the docker volumes on the target server. Response: [Update]
|
||||
prune-system Prunes the docker system on the target server, including volumes. Response: [Update]
|
||||
run-sync Runs the target resource sync. Response: [Update]
|
||||
deploy-stack Deploys the target stack. `docker compose up`. Response: [Update]
|
||||
start-stack Starts the target stack. `docker compose start`. Response: [Update]
|
||||
restart-stack Restarts the target stack. `docker compose restart`. Response: [Update]
|
||||
pause-stack Pauses the target stack. `docker compose pause`. Response: [Update]
|
||||
unpause-stack Unpauses the target stack. `docker compose unpause`. Response: [Update]
|
||||
stop-stack Starts the target stack. `docker compose stop`. Response: [Update]
|
||||
destroy-stack Destoys the target stack. `docker compose down`. Response: [Update]
|
||||
sleep
|
||||
help Print this message or the help of the given subcommand(s)
|
||||
|
||||
Options:
|
||||
-h, --help Print help
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use monitor_client::api::execute::Execution;
|
||||
use komodo_client::api::execute::Execution;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
@@ -32,30 +32,19 @@ pub struct CliArgs {
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home = std::env::var("HOME")
|
||||
.expect("no HOME env var. cannot get default config path.");
|
||||
format!("{home}/.config/monitor/creds.toml")
|
||||
let home =
|
||||
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
|
||||
format!("{home}/.config/komodo/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
pub enum Command {
|
||||
/// Runs syncs on resource files
|
||||
Sync {
|
||||
/// The path of the resource folder / file
|
||||
/// Folder paths will recursively incorporate all the resources it finds under the folder
|
||||
#[arg(default_value_t = String::from("./resources"))]
|
||||
path: String,
|
||||
|
||||
/// Will delete any resources that aren't included in the resource files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
delete: bool,
|
||||
},
|
||||
|
||||
/// Runs an execution
|
||||
Execute {
|
||||
#[command(subcommand)]
|
||||
execution: Execution,
|
||||
},
|
||||
// Room for more
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::api::execute::Execution;
|
||||
use komodo_client::api::execute::Execution;
|
||||
|
||||
use crate::{
|
||||
helpers::wait_for_enter,
|
||||
state::{cli_args, monitor_client},
|
||||
state::{cli_args, komodo_client},
|
||||
};
|
||||
|
||||
pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
@@ -27,19 +27,28 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
Execution::RunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Deploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
Execution::StartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
Execution::RestartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
Execution::PauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RemoveContainer(data) => {
|
||||
Execution::UnpauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CloneRepo(data) => {
|
||||
@@ -48,18 +57,99 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
Execution::PullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
Execution::BuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
Execution::CancelRepoBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteNetwork(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteImage(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteVolume(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneVolumes(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneDockerBuilders(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneBuildx(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneSystem(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -73,43 +163,133 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
|
||||
let res = match execution {
|
||||
Execution::RunProcedure(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::RunBuild(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::CancelBuild(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::Deploy(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StartContainer(request) => {
|
||||
monitor_client().execute(request).await
|
||||
Execution::StartDeployment(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StopContainer(request) => {
|
||||
monitor_client().execute(request).await
|
||||
Execution::RestartDeployment(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StopAllContainers(request) => {
|
||||
monitor_client().execute(request).await
|
||||
Execution::PauseDeployment(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::RemoveContainer(request) => {
|
||||
monitor_client().execute(request).await
|
||||
Execution::UnpauseDeployment(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StopDeployment(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DestroyDeployment(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::CloneRepo(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PullRepo(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneNetworks(request) => {
|
||||
monitor_client().execute(request).await
|
||||
Execution::BuildRepo(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneImages(request) => {
|
||||
monitor_client().execute(request).await
|
||||
Execution::CancelRepoBuild(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StartContainer(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::RestartContainer(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PauseContainer(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::UnpauseContainer(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StopContainer(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DestroyContainer(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StartAllContainers(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::RestartAllContainers(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PauseAllContainers(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::UnpauseAllContainers(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StopAllContainers(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneContainers(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DeleteNetwork(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneNetworks(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DeleteImage(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneImages(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DeleteVolume(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneVolumes(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneDockerBuilders(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneBuildx(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PruneSystem(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::RunSync(request) => {
|
||||
monitor_client().execute(request).await
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DeployStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StartStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::RestartStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::PauseStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::UnpauseStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::StopStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::DestroyStack(request) => {
|
||||
komodo_client().execute(request).await
|
||||
}
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
|
||||
@@ -2,27 +2,27 @@
|
||||
extern crate tracing;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::api::read::GetVersion;
|
||||
use komodo_client::api::read::GetVersion;
|
||||
|
||||
mod args;
|
||||
mod exec;
|
||||
mod helpers;
|
||||
mod maps;
|
||||
mod state;
|
||||
mod sync;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt().with_target(false).init();
|
||||
|
||||
info!(
|
||||
"Komodo CLI version: {}",
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
|
||||
let version =
|
||||
state::monitor_client().read(GetVersion {}).await?.version;
|
||||
info!("monitor version: {}", version.to_string().blue().bold());
|
||||
state::komodo_client().read(GetVersion {}).await?.version;
|
||||
info!("Komodo Core version: {}", version.blue().bold());
|
||||
|
||||
match &state::cli_args().command {
|
||||
args::Command::Sync { path, delete } => {
|
||||
sync::run(path, *delete).await?
|
||||
}
|
||||
args::Command::Execute { execution } => {
|
||||
exec::run(execution.to_owned()).await?
|
||||
}
|
||||
|
||||
@@ -1,328 +0,0 @@
|
||||
use std::{collections::HashMap, sync::OnceLock};
|
||||
|
||||
use monitor_client::{
|
||||
api::read,
|
||||
entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
sync::ResourceSync, tag::Tag, user::User, user_group::UserGroup,
|
||||
variable::Variable,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::state::monitor_client;
|
||||
|
||||
pub fn name_to_build() -> &'static HashMap<String, Build> {
|
||||
static NAME_TO_BUILD: OnceLock<HashMap<String, Build>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_BUILD.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullBuilds::default()),
|
||||
)
|
||||
.expect("failed to get builds from monitor")
|
||||
.into_iter()
|
||||
.map(|build| (build.name.clone(), build))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_build() -> &'static HashMap<String, Build> {
|
||||
static ID_TO_BUILD: OnceLock<HashMap<String, Build>> =
|
||||
OnceLock::new();
|
||||
ID_TO_BUILD.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullBuilds::default()),
|
||||
)
|
||||
.expect("failed to get builds from monitor")
|
||||
.into_iter()
|
||||
.map(|build| (build.id.clone(), build))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_deployment() -> &'static HashMap<String, Deployment> {
|
||||
static NAME_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_DEPLOYMENT.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullDeployments::default()),
|
||||
)
|
||||
.expect("failed to get deployments from monitor")
|
||||
.into_iter()
|
||||
.map(|deployment| (deployment.name.clone(), deployment))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_deployment() -> &'static HashMap<String, Deployment> {
|
||||
static ID_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
|
||||
OnceLock::new();
|
||||
ID_TO_DEPLOYMENT.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullDeployments::default()),
|
||||
)
|
||||
.expect("failed to get deployments from monitor")
|
||||
.into_iter()
|
||||
.map(|deployment| (deployment.id.clone(), deployment))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_server() -> &'static HashMap<String, Server> {
|
||||
static NAME_TO_SERVER: OnceLock<HashMap<String, Server>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_SERVER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullServers::default()),
|
||||
)
|
||||
.expect("failed to get servers from monitor")
|
||||
.into_iter()
|
||||
.map(|server| (server.name.clone(), server))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_server() -> &'static HashMap<String, Server> {
|
||||
static ID_TO_SERVER: OnceLock<HashMap<String, Server>> =
|
||||
OnceLock::new();
|
||||
ID_TO_SERVER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullServers::default()),
|
||||
)
|
||||
.expect("failed to get servers from monitor")
|
||||
.into_iter()
|
||||
.map(|server| (server.id.clone(), server))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_builder() -> &'static HashMap<String, Builder> {
|
||||
static NAME_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_BUILDER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullBuilders::default()),
|
||||
)
|
||||
.expect("failed to get builders from monitor")
|
||||
.into_iter()
|
||||
.map(|builder| (builder.name.clone(), builder))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_builder() -> &'static HashMap<String, Builder> {
|
||||
static ID_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
|
||||
OnceLock::new();
|
||||
ID_TO_BUILDER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullBuilders::default()),
|
||||
)
|
||||
.expect("failed to get builders from monitor")
|
||||
.into_iter()
|
||||
.map(|builder| (builder.id.clone(), builder))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_alerter() -> &'static HashMap<String, Alerter> {
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullAlerters::default()),
|
||||
)
|
||||
.expect("failed to get alerters from monitor")
|
||||
.into_iter()
|
||||
.map(|alerter| (alerter.name.clone(), alerter))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_alerter() -> &'static HashMap<String, Alerter> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
|
||||
OnceLock::new();
|
||||
ID_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullAlerters::default()),
|
||||
)
|
||||
.expect("failed to get alerters from monitor")
|
||||
.into_iter()
|
||||
.map(|alerter| (alerter.id.clone(), alerter))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_repo() -> &'static HashMap<String, Repo> {
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullRepos::default()),
|
||||
)
|
||||
.expect("failed to get repos from monitor")
|
||||
.into_iter()
|
||||
.map(|repo| (repo.name.clone(), repo))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_repo() -> &'static HashMap<String, Repo> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
|
||||
OnceLock::new();
|
||||
ID_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullRepos::default()),
|
||||
)
|
||||
.expect("failed to get repos from monitor")
|
||||
.into_iter()
|
||||
.map(|repo| (repo.id.clone(), repo))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_procedure() -> &'static HashMap<String, Procedure> {
|
||||
static NAME_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_PROCEDURE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullProcedures::default()),
|
||||
)
|
||||
.expect("failed to get procedures from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.name.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_procedure() -> &'static HashMap<String, Procedure> {
|
||||
static ID_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
|
||||
OnceLock::new();
|
||||
ID_TO_PROCEDURE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullProcedures::default()),
|
||||
)
|
||||
.expect("failed to get procedures from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.id.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_server_template(
|
||||
) -> &'static HashMap<String, ServerTemplate> {
|
||||
static NAME_TO_SERVER_TEMPLATE: OnceLock<
|
||||
HashMap<String, ServerTemplate>,
|
||||
> = OnceLock::new();
|
||||
NAME_TO_SERVER_TEMPLATE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullServerTemplates::default()),
|
||||
)
|
||||
.expect("failed to get server templates from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.name.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_server_template(
|
||||
) -> &'static HashMap<String, ServerTemplate> {
|
||||
static ID_TO_SERVER_TEMPLATE: OnceLock<
|
||||
HashMap<String, ServerTemplate>,
|
||||
> = OnceLock::new();
|
||||
ID_TO_SERVER_TEMPLATE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullServerTemplates::default()),
|
||||
)
|
||||
.expect("failed to get server templates from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.id.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_resource_sync(
|
||||
) -> &'static HashMap<String, ResourceSync> {
|
||||
static NAME_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_SYNC.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullResourceSyncs::default()),
|
||||
)
|
||||
.expect("failed to get syncs from monitor")
|
||||
.into_iter()
|
||||
.map(|sync| (sync.name.clone(), sync))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_resource_sync() -> &'static HashMap<String, ResourceSync>
|
||||
{
|
||||
static ID_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
|
||||
OnceLock::new();
|
||||
ID_TO_SYNC.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullResourceSyncs::default()),
|
||||
)
|
||||
.expect("failed to get syncs from monitor")
|
||||
.into_iter()
|
||||
.map(|sync| (sync.id.clone(), sync))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
|
||||
static NAME_TO_USER_GROUP: OnceLock<HashMap<String, UserGroup>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_USER_GROUP.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListUserGroups::default()),
|
||||
)
|
||||
.expect("failed to get user groups from monitor")
|
||||
.into_iter()
|
||||
.map(|user_group| (user_group.name.clone(), user_group))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_variable() -> &'static HashMap<String, Variable> {
|
||||
static NAME_TO_VARIABLE: OnceLock<HashMap<String, Variable>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_VARIABLE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListVariables::default()),
|
||||
)
|
||||
.expect("failed to get user groups from monitor")
|
||||
.variables
|
||||
.into_iter()
|
||||
.map(|variable| (variable.name.clone(), variable))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_user() -> &'static HashMap<String, User> {
|
||||
static ID_TO_USER: OnceLock<HashMap<String, User>> =
|
||||
OnceLock::new();
|
||||
ID_TO_USER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListUsers::default()),
|
||||
)
|
||||
.expect("failed to get users from monitor")
|
||||
.into_iter()
|
||||
.map(|user| (user.id.clone(), user))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_tag() -> &'static HashMap<String, Tag> {
|
||||
static ID_TO_TAG: OnceLock<HashMap<String, Tag>> = OnceLock::new();
|
||||
ID_TO_TAG.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListTags::default()),
|
||||
)
|
||||
.expect("failed to get tags from monitor")
|
||||
.into_iter()
|
||||
.map(|tag| (tag.id.clone(), tag))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
@@ -1,17 +1,17 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use clap::Parser;
|
||||
use komodo_client::KomodoClient;
|
||||
use merge_config_files::parse_config_file;
|
||||
use monitor_client::MonitorClient;
|
||||
|
||||
pub fn cli_args() -> &'static crate::args::CliArgs {
|
||||
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn monitor_client() -> &'static MonitorClient {
|
||||
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
|
||||
MONITOR_CLIENT.get_or_init(|| {
|
||||
pub fn komodo_client() -> &'static KomodoClient {
|
||||
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
|
||||
KOMODO_CLIENT.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let crate::args::CredsFile { url, key, secret } =
|
||||
match (&args.url, &args.key, &args.secret) {
|
||||
@@ -25,7 +25,7 @@ pub fn monitor_client() -> &'static MonitorClient {
|
||||
(url, key, secret) => {
|
||||
let mut creds: crate::args::CredsFile =
|
||||
parse_config_file(cli_args().creds.as_str())
|
||||
.expect("failed to parse monitor credentials");
|
||||
.expect("failed to parse Komodo credentials");
|
||||
|
||||
if let Some(url) = url {
|
||||
creds.url.clone_from(url);
|
||||
@@ -40,7 +40,7 @@ pub fn monitor_client() -> &'static MonitorClient {
|
||||
creds
|
||||
}
|
||||
};
|
||||
futures::executor::block_on(MonitorClient::new(url, key, secret))
|
||||
.expect("failed to initialize monitor client")
|
||||
futures::executor::block_on(KomodoClient::new(url, key, secret))
|
||||
.expect("failed to initialize Komodo client")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
use std::{
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use colored::Colorize;
|
||||
use monitor_client::entities::toml::ResourcesToml;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
pub fn read_resources(path: &str) -> anyhow::Result<ResourcesToml> {
|
||||
let mut res = ResourcesToml::default();
|
||||
let path =
|
||||
PathBuf::from_str(path).context("invalid resources path")?;
|
||||
read_resources_recursive(&path, &mut res)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn read_resources_recursive(
|
||||
path: &Path,
|
||||
resources: &mut ResourcesToml,
|
||||
) -> anyhow::Result<()> {
|
||||
let res =
|
||||
fs::metadata(path).context("failed to get path metadata")?;
|
||||
if res.is_file() {
|
||||
if !path
|
||||
.extension()
|
||||
.map(|ext| ext == "toml")
|
||||
.unwrap_or_default()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
let more = match parse_toml_file::<ResourcesToml>(path) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
warn!("failed to parse {:?}. skipping file | {e:#}", path);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"{} from {}",
|
||||
"adding resources".green().bold(),
|
||||
path.display().to_string().blue().bold()
|
||||
);
|
||||
resources.servers.extend(more.servers);
|
||||
resources.deployments.extend(more.deployments);
|
||||
resources.builds.extend(more.builds);
|
||||
resources.repos.extend(more.repos);
|
||||
resources.procedures.extend(more.procedures);
|
||||
resources.builders.extend(more.builders);
|
||||
resources.alerters.extend(more.alerters);
|
||||
resources.server_templates.extend(more.server_templates);
|
||||
resources.resource_syncs.extend(more.resource_syncs);
|
||||
resources.user_groups.extend(more.user_groups);
|
||||
resources.variables.extend(more.variables);
|
||||
Ok(())
|
||||
} else if res.is_dir() {
|
||||
let directory = fs::read_dir(path)
|
||||
.context("failed to read directory contents")?;
|
||||
for entry in directory.into_iter().flatten() {
|
||||
if let Err(e) =
|
||||
read_resources_recursive(&entry.path(), resources)
|
||||
{
|
||||
warn!("failed to read additional resources at path | {e:#}");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("resources path is neither file nor directory"))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_toml_file<T: DeserializeOwned>(
|
||||
path: impl AsRef<std::path::Path>,
|
||||
) -> anyhow::Result<T> {
|
||||
let contents = std::fs::read_to_string(path)
|
||||
.context("failed to read file contents")?;
|
||||
toml::from_str(&contents).context("failed to parse toml contents")
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
use colored::Colorize;
|
||||
use monitor_client::entities::{
|
||||
self, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
};
|
||||
|
||||
use crate::{helpers::wait_for_enter, state::cli_args};
|
||||
|
||||
mod file;
|
||||
mod resource;
|
||||
mod resources;
|
||||
mod user_group;
|
||||
mod variables;
|
||||
|
||||
use resource::ResourceSync;
|
||||
|
||||
pub async fn run(path: &str, delete: bool) -> anyhow::Result<()> {
|
||||
info!("resources path: {}", path.blue().bold());
|
||||
if delete {
|
||||
warn!("Delete mode {}", "enabled".bold());
|
||||
}
|
||||
|
||||
let resources = file::read_resources(path)?;
|
||||
|
||||
info!("computing sync actions...");
|
||||
|
||||
let (server_creates, server_updates, server_deletes) =
|
||||
resource::get_updates::<Server>(resources.servers, delete)?;
|
||||
let (deployment_creates, deployment_updates, deployment_deletes) =
|
||||
resource::get_updates::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
)?;
|
||||
let (build_creates, build_updates, build_deletes) =
|
||||
resource::get_updates::<Build>(resources.builds, delete)?;
|
||||
let (repo_creates, repo_updates, repo_deletes) =
|
||||
resource::get_updates::<Repo>(resources.repos, delete)?;
|
||||
let (procedure_creates, procedure_updates, procedure_deletes) =
|
||||
resource::get_updates::<Procedure>(resources.procedures, delete)?;
|
||||
let (builder_creates, builder_updates, builder_deletes) =
|
||||
resource::get_updates::<Builder>(resources.builders, delete)?;
|
||||
let (alerter_creates, alerter_updates, alerter_deletes) =
|
||||
resource::get_updates::<Alerter>(resources.alerters, delete)?;
|
||||
let (
|
||||
server_template_creates,
|
||||
server_template_updates,
|
||||
server_template_deletes,
|
||||
) = resource::get_updates::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
)?;
|
||||
let (
|
||||
resource_sync_creates,
|
||||
resource_sync_updates,
|
||||
resource_sync_deletes,
|
||||
) = resource::get_updates::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
)?;
|
||||
|
||||
let (variable_creates, variable_updates, variable_deletes) =
|
||||
variables::get_updates(resources.variables, delete)?;
|
||||
|
||||
let (user_group_creates, user_group_updates, user_group_deletes) =
|
||||
user_group::get_updates(resources.user_groups, delete).await?;
|
||||
|
||||
if resource_sync_creates.is_empty()
|
||||
&& resource_sync_updates.is_empty()
|
||||
&& resource_sync_deletes.is_empty()
|
||||
&& server_template_creates.is_empty()
|
||||
&& server_template_updates.is_empty()
|
||||
&& server_template_deletes.is_empty()
|
||||
&& server_creates.is_empty()
|
||||
&& server_updates.is_empty()
|
||||
&& server_deletes.is_empty()
|
||||
&& deployment_creates.is_empty()
|
||||
&& deployment_updates.is_empty()
|
||||
&& deployment_deletes.is_empty()
|
||||
&& build_creates.is_empty()
|
||||
&& build_updates.is_empty()
|
||||
&& build_deletes.is_empty()
|
||||
&& builder_creates.is_empty()
|
||||
&& builder_updates.is_empty()
|
||||
&& builder_deletes.is_empty()
|
||||
&& alerter_creates.is_empty()
|
||||
&& alerter_updates.is_empty()
|
||||
&& alerter_deletes.is_empty()
|
||||
&& repo_creates.is_empty()
|
||||
&& repo_updates.is_empty()
|
||||
&& repo_deletes.is_empty()
|
||||
&& procedure_creates.is_empty()
|
||||
&& procedure_updates.is_empty()
|
||||
&& procedure_deletes.is_empty()
|
||||
&& user_group_creates.is_empty()
|
||||
&& user_group_updates.is_empty()
|
||||
&& user_group_deletes.is_empty()
|
||||
&& variable_creates.is_empty()
|
||||
&& variable_updates.is_empty()
|
||||
&& variable_deletes.is_empty()
|
||||
{
|
||||
info!("{}. exiting.", "nothing to do".green().bold());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if !cli_args().yes {
|
||||
wait_for_enter("run sync")?;
|
||||
}
|
||||
|
||||
// No deps
|
||||
entities::sync::ResourceSync::run_updates(
|
||||
resource_sync_creates,
|
||||
resource_sync_updates,
|
||||
resource_sync_deletes,
|
||||
)
|
||||
.await;
|
||||
ServerTemplate::run_updates(
|
||||
server_template_creates,
|
||||
server_template_updates,
|
||||
server_template_deletes,
|
||||
)
|
||||
.await;
|
||||
Server::run_updates(server_creates, server_updates, server_deletes)
|
||||
.await;
|
||||
Alerter::run_updates(
|
||||
alerter_creates,
|
||||
alerter_updates,
|
||||
alerter_deletes,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Dependant on server
|
||||
Builder::run_updates(
|
||||
builder_creates,
|
||||
builder_updates,
|
||||
builder_deletes,
|
||||
)
|
||||
.await;
|
||||
Repo::run_updates(repo_creates, repo_updates, repo_deletes).await;
|
||||
|
||||
// Dependant on builder
|
||||
Build::run_updates(build_creates, build_updates, build_deletes)
|
||||
.await;
|
||||
|
||||
// Dependant on server / build
|
||||
Deployment::run_updates(
|
||||
deployment_creates,
|
||||
deployment_updates,
|
||||
deployment_deletes,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Dependant on everything
|
||||
Procedure::run_updates(
|
||||
procedure_creates,
|
||||
procedure_updates,
|
||||
procedure_deletes,
|
||||
)
|
||||
.await;
|
||||
variables::run_updates(
|
||||
variable_creates,
|
||||
variable_updates,
|
||||
variable_deletes,
|
||||
)
|
||||
.await;
|
||||
user_group::run_updates(
|
||||
user_group_creates,
|
||||
user_group_updates,
|
||||
user_group_deletes,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,358 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::write::{UpdateDescription, UpdateTagsOnResource},
|
||||
entities::{
|
||||
resource::Resource, toml::ResourceToml, update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::maps::id_to_tag;
|
||||
|
||||
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
|
||||
pub type ToCreate<T> = Vec<ResourceToml<T>>;
|
||||
/// Vec of resource names
|
||||
pub type ToDelete = Vec<String>;
|
||||
|
||||
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>, ToDelete);
|
||||
|
||||
pub struct ToUpdateItem<T: Default> {
|
||||
pub id: String,
|
||||
pub resource: ResourceToml<T>,
|
||||
pub update_description: bool,
|
||||
pub update_tags: bool,
|
||||
}
|
||||
|
||||
pub trait ResourceSync: Sized {
|
||||
type Config: Clone
|
||||
+ Default
|
||||
+ Send
|
||||
+ From<Self::PartialConfig>
|
||||
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
|
||||
+ 'static;
|
||||
type Info: Default + 'static;
|
||||
type PartialConfig: std::fmt::Debug
|
||||
+ Clone
|
||||
+ Send
|
||||
+ Default
|
||||
+ From<Self::Config>
|
||||
+ From<Self::ConfigDiff>
|
||||
+ Serialize
|
||||
+ MaybeNone
|
||||
+ 'static;
|
||||
type ConfigDiff: Diff + MaybeNone;
|
||||
|
||||
fn display() -> &'static str;
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget;
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>;
|
||||
|
||||
/// Creates the resource and returns created id.
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String>;
|
||||
|
||||
/// Updates the resource at id with the partial config.
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
/// Apply any changes to incoming toml partial config
|
||||
/// before it is diffed against existing config
|
||||
fn validate_partial_config(_config: &mut Self::PartialConfig) {}
|
||||
|
||||
/// Diffs the declared toml (partial) against the full existing config.
|
||||
/// Removes all fields from toml (partial) that haven't changed.
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff>;
|
||||
|
||||
/// Apply any changes to computed config diff
|
||||
/// before logging
|
||||
fn validate_diff(_diff: &mut Self::ConfigDiff) {}
|
||||
|
||||
/// Deletes the target resource
|
||||
async fn delete(id_or_name: String) -> anyhow::Result<()>;
|
||||
|
||||
async fn run_updates(
|
||||
to_create: ToCreate<Self::PartialConfig>,
|
||||
to_update: ToUpdate<Self::PartialConfig>,
|
||||
to_delete: ToDelete,
|
||||
) {
|
||||
for resource in to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match Self::create(resource).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to create {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
run_update_tags::<Self>(id.clone(), &name, tags).await;
|
||||
run_update_description::<Self>(id, &name, description).await;
|
||||
info!(
|
||||
"{} {} '{}'",
|
||||
"created".green().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
for ToUpdateItem {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_tags,
|
||||
} in to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
|
||||
if update_description {
|
||||
run_update_description::<Self>(
|
||||
id.clone(),
|
||||
&name,
|
||||
description,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if update_tags {
|
||||
run_update_tags::<Self>(id.clone(), &name, tags).await;
|
||||
}
|
||||
|
||||
if !resource.config.is_none() {
|
||||
if let Err(e) = Self::update(id, resource).await {
|
||||
warn!(
|
||||
"failed to update config on {} {name} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' configuration",
|
||||
"updated".blue().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for resource in to_delete {
|
||||
if let Err(e) = Self::delete(resource.clone()).await {
|
||||
warn!(
|
||||
"failed to delete {} {resource} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}'",
|
||||
"deleted".red().bold(),
|
||||
Self::display(),
|
||||
resource.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets all the resources to update, logging along the way.
|
||||
pub fn get_updates<Resource: ResourceSync>(
|
||||
resources: Vec<ResourceToml<Resource::PartialConfig>>,
|
||||
delete: bool,
|
||||
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
|
||||
let map = Resource::name_to_resource();
|
||||
|
||||
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
|
||||
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
|
||||
let mut to_delete = ToDelete::new();
|
||||
|
||||
if delete {
|
||||
for resource in map.values() {
|
||||
if !resources.iter().any(|r| r.name == resource.name) {
|
||||
to_delete.push(resource.name.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for mut resource in resources {
|
||||
match map.get(&resource.name) {
|
||||
Some(original) => {
|
||||
// First merge toml resource config (partial) onto default resource config.
|
||||
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
|
||||
let config: Resource::Config = resource.config.into();
|
||||
resource.config = config.into();
|
||||
|
||||
Resource::validate_partial_config(&mut resource.config);
|
||||
|
||||
let mut diff = Resource::get_diff(
|
||||
original.config.clone(),
|
||||
resource.config,
|
||||
)?;
|
||||
|
||||
Resource::validate_diff(&mut diff);
|
||||
|
||||
let original_tags = original
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|id| {
|
||||
id_to_tag().get(id).map(|t| t.name.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Only proceed if there are any fields to update,
|
||||
// or a change to tags / description
|
||||
if diff.is_none()
|
||||
&& resource.description == original.description
|
||||
&& resource.tags == original_tags
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
println!(
|
||||
"\n{}: {}: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
Resource::display(),
|
||||
resource.name.bold(),
|
||||
);
|
||||
let mut lines = Vec::<String>::new();
|
||||
if resource.description != original.description {
|
||||
lines.push(format!(
|
||||
"{}: 'description'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.description.red(),
|
||||
"to".dimmed(),
|
||||
resource.description.green()
|
||||
))
|
||||
}
|
||||
if resource.tags != original_tags {
|
||||
let from = format!("{:?}", original_tags).red();
|
||||
let to = format!("{:?}", resource.tags).green();
|
||||
lines.push(format!(
|
||||
"{}: 'tags'\n{}: {from}\n{}: {to}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
"to".dimmed(),
|
||||
));
|
||||
}
|
||||
lines.extend(diff.iter_field_diffs().map(
|
||||
|FieldDiff { field, from, to }| {
|
||||
format!(
|
||||
"{}: '{field}'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
from.red(),
|
||||
"to".dimmed(),
|
||||
to.green()
|
||||
)
|
||||
},
|
||||
));
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
|
||||
// Minimizes updates through diffing.
|
||||
resource.config = diff.into();
|
||||
|
||||
let update = ToUpdateItem {
|
||||
id: original.id.clone(),
|
||||
update_description: resource.description
|
||||
!= original.description,
|
||||
update_tags: resource.tags != original_tags,
|
||||
resource,
|
||||
};
|
||||
|
||||
to_update.push(update);
|
||||
}
|
||||
None => {
|
||||
println!(
|
||||
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
Resource::display(),
|
||||
resource.name.bold().green(),
|
||||
"description".dimmed(),
|
||||
resource.description,
|
||||
"tags".dimmed(),
|
||||
resource.tags,
|
||||
"config".dimmed(),
|
||||
serde_json::to_string_pretty(&resource.config)?
|
||||
);
|
||||
to_create.push(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name in &to_delete {
|
||||
println!(
|
||||
"\n{}: {}: '{}'\n-------------------",
|
||||
"DELETE".red(),
|
||||
Resource::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update, to_delete))
|
||||
}
|
||||
|
||||
pub async fn run_update_tags<Resource: ResourceSync>(
|
||||
id: String,
|
||||
name: &str,
|
||||
tags: Vec<String>,
|
||||
) {
|
||||
// Update tags
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(UpdateTagsOnResource {
|
||||
target: Resource::resource_target(id),
|
||||
tags,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update tags on {} {name} | {e:#}",
|
||||
Resource::display(),
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' tags",
|
||||
"updated".blue().bold(),
|
||||
Resource::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_update_description<Resource: ResourceSync>(
|
||||
id: String,
|
||||
name: &str,
|
||||
description: String,
|
||||
) {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(UpdateDescription {
|
||||
target: Resource::resource_target(id.clone()),
|
||||
description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to update resource {id} description | {e:#}");
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' description",
|
||||
"updated".blue().bold(),
|
||||
Resource::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
use partial_derive2::PartialDiff;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{CreateAlerter, DeleteAlerter, UpdateAlerter},
|
||||
entities::{
|
||||
alerter::{
|
||||
Alerter, AlerterConfig, AlerterConfigDiff, PartialAlerterConfig,
|
||||
},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
maps::name_to_alerter, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Alerter {
|
||||
type Config = AlerterConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialAlerterConfig;
|
||||
type ConfigDiff = AlerterConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"alerter"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Alerter(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_alerter()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateAlerter {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateAlerter {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteAlerter { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{CreateBuild, DeleteBuild, UpdateBuild},
|
||||
entities::{
|
||||
build::{
|
||||
Build, BuildConfig, BuildConfigDiff, BuildInfo,
|
||||
PartialBuildConfig,
|
||||
},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_builder, name_to_build},
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Build {
|
||||
type Config = BuildConfig;
|
||||
type Info = BuildInfo;
|
||||
type PartialConfig = PartialBuildConfig;
|
||||
type ConfigDiff = BuildConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"build"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Build(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_build()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateBuild {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateBuild {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace the builder id with name
|
||||
original.builder_id = id_to_builder()
|
||||
.get(&original.builder_id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
fn validate_diff(diff: &mut Self::ConfigDiff) {
|
||||
if let Some((_, to)) = &diff.version {
|
||||
if to.is_none() {
|
||||
diff.version = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteBuild { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{CreateBuilder, DeleteBuilder, UpdateBuilder},
|
||||
entities::{
|
||||
builder::{
|
||||
Builder, BuilderConfig, BuilderConfigDiff, PartialBuilderConfig,
|
||||
},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_server, name_to_builder},
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Builder {
|
||||
type Config = BuilderConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialBuilderConfig;
|
||||
type ConfigDiff = BuilderConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"builder"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Builder(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_builder()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateBuilder {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateBuilder {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace server builder id with name
|
||||
if let BuilderConfig::Server(config) = &mut original {
|
||||
config.server_id = id_to_server()
|
||||
.get(&config.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteBuilder { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{self, DeleteDeployment},
|
||||
entities::{
|
||||
deployment::{
|
||||
Deployment, DeploymentConfig, DeploymentConfigDiff,
|
||||
DeploymentImage, PartialDeploymentConfig,
|
||||
},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_build, id_to_server, name_to_deployment},
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Deployment {
|
||||
type Config = DeploymentConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialDeploymentConfig;
|
||||
type ConfigDiff = DeploymentConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"deployment"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Deployment(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_deployment()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(write::CreateDeployment {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(write::UpdateDeployment {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace the server id with name
|
||||
original.server_id = id_to_server()
|
||||
.get(&original.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
// need to replace the build id with name
|
||||
if let DeploymentImage::Build { build_id, version } =
|
||||
&original.image
|
||||
{
|
||||
original.image = DeploymentImage::Build {
|
||||
build_id: id_to_build()
|
||||
.get(build_id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default(),
|
||||
version: *version,
|
||||
};
|
||||
}
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteDeployment { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod sync;
|
||||
@@ -1,275 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
execute::Execution,
|
||||
write::{CreateProcedure, DeleteProcedure, UpdateProcedure},
|
||||
},
|
||||
entities::{
|
||||
procedure::{
|
||||
PartialProcedureConfig, Procedure, ProcedureConfig,
|
||||
ProcedureConfigDiff,
|
||||
},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::{MaybeNone, PartialDiff};
|
||||
|
||||
use crate::{
|
||||
maps::{
|
||||
id_to_build, id_to_deployment, id_to_procedure, id_to_repo,
|
||||
id_to_resource_sync, id_to_server, name_to_procedure,
|
||||
},
|
||||
state::monitor_client,
|
||||
sync::resource::{
|
||||
run_update_description, run_update_tags, ResourceSync, ToCreate,
|
||||
ToDelete, ToUpdate, ToUpdateItem,
|
||||
},
|
||||
};
|
||||
|
||||
impl ResourceSync for Procedure {
|
||||
type Config = ProcedureConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialProcedureConfig;
|
||||
type ConfigDiff = ProcedureConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"procedure"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Procedure(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_procedure()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateProcedure {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|p| p.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateProcedure {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_updates(
|
||||
mut to_create: ToCreate<Self::PartialConfig>,
|
||||
mut to_update: ToUpdate<Self::PartialConfig>,
|
||||
to_delete: ToDelete,
|
||||
) {
|
||||
for name in to_delete {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(DeleteProcedure { id: name.clone() })
|
||||
.await
|
||||
{
|
||||
warn!("failed to delete procedure {name} | {e:#}",);
|
||||
} else {
|
||||
info!(
|
||||
"{} procedure '{}'",
|
||||
"deleted".red().bold(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if to_update.is_empty() && to_create.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for i in 0..10 {
|
||||
let mut to_pull = Vec::new();
|
||||
for ToUpdateItem {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_tags,
|
||||
} in &to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
if *update_description {
|
||||
run_update_description::<Procedure>(
|
||||
id.clone(),
|
||||
&name,
|
||||
description,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if *update_tags {
|
||||
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
|
||||
}
|
||||
if !resource.config.is_none() {
|
||||
if let Err(e) =
|
||||
Self::update(id.clone(), resource.clone()).await
|
||||
{
|
||||
if i == 9 {
|
||||
warn!(
|
||||
"failed to update {} {name} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
info!("{} {name} updated", Self::display());
|
||||
// have to clone out so to_update is mutable
|
||||
to_pull.push(id.clone());
|
||||
}
|
||||
//
|
||||
to_update.retain(|resource| !to_pull.contains(&resource.id));
|
||||
|
||||
let mut to_pull = Vec::new();
|
||||
for resource in &to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match Self::create(resource.clone()).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
if i == 9 {
|
||||
warn!(
|
||||
"failed to create {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
|
||||
run_update_description::<Procedure>(id, &name, description)
|
||||
.await;
|
||||
info!("{} {name} created", Self::display());
|
||||
to_pull.push(name);
|
||||
}
|
||||
to_create.retain(|resource| !to_pull.contains(&resource.name));
|
||||
|
||||
if to_update.is_empty() && to_create.is_empty() {
|
||||
// info!("all procedures synced");
|
||||
return;
|
||||
}
|
||||
}
|
||||
warn!("procedure sync loop exited after max iterations");
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
for stage in &mut original.stages {
|
||||
for execution in &mut stage.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::None(_) | Execution::Sleep(_) => {}
|
||||
Execution::RunProcedure(config) => {
|
||||
config.procedure = id_to_procedure()
|
||||
.get(&config.procedure)
|
||||
.map(|p| p.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunBuild(config) => {
|
||||
config.build = id_to_build()
|
||||
.get(&config.build)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::Deploy(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StartContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RemoveContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::CloneRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PullRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopAllContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneNetworks(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneImages(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunSync(config) => {
|
||||
config.sync = id_to_resource_sync()
|
||||
.get(&config.sync)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(_: String) -> anyhow::Result<()> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{CreateRepo, DeleteRepo, UpdateRepo},
|
||||
entities::{
|
||||
repo::{
|
||||
PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo,
|
||||
},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_server, name_to_repo},
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Repo {
|
||||
type Config = RepoConfig;
|
||||
type Info = RepoInfo;
|
||||
type PartialConfig = PartialRepoConfig;
|
||||
type ConfigDiff = RepoConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"repo"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Repo(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_repo()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateRepo {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateRepo {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// Need to replace server id with name
|
||||
original.server_id = id_to_server()
|
||||
.get(&original.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteRepo { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{CreateServer, DeleteServer, UpdateServer},
|
||||
entities::{
|
||||
resource::Resource,
|
||||
server::{
|
||||
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::name_to_server, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Server {
|
||||
type Config = ServerConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialServerConfig;
|
||||
type ConfigDiff = ServerConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"server"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Server(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_server()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateServer {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateServer {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteServer { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate,
|
||||
},
|
||||
entities::{
|
||||
resource::Resource,
|
||||
server_template::{
|
||||
PartialServerTemplateConfig, ServerTemplate,
|
||||
ServerTemplateConfig, ServerTemplateConfigDiff,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::name_to_server_template, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for ServerTemplate {
|
||||
type Config = ServerTemplateConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialServerTemplateConfig;
|
||||
type ConfigDiff = ServerTemplateConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"server template"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::ServerTemplate(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_server_template()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateServerTemplate {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateServerTemplate {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteServerTemplate { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateResourceSync, DeleteResourceSync, UpdateResourceSync,
|
||||
},
|
||||
entities::{
|
||||
self,
|
||||
resource::Resource,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, ResourceSyncConfig,
|
||||
ResourceSyncConfigDiff, ResourceSyncInfo,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::name_to_resource_sync, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for entities::sync::ResourceSync {
|
||||
type Config = ResourceSyncConfig;
|
||||
type Info = ResourceSyncInfo;
|
||||
type PartialConfig = PartialResourceSyncConfig;
|
||||
type ConfigDiff = ResourceSyncConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"resource sync"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::ResourceSync(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_resource_sync()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateResourceSync {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateResourceSync {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteResourceSync { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,388 +0,0 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::ListUserTargetPermissions,
|
||||
write::{
|
||||
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
|
||||
UpdatePermissionOnTarget,
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
permission::UserTarget,
|
||||
toml::{PermissionToml, UserGroupToml},
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::maps::{
|
||||
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
|
||||
id_to_procedure, id_to_repo, id_to_resource_sync, id_to_server,
|
||||
id_to_server_template, id_to_user, name_to_user_group,
|
||||
};
|
||||
|
||||
pub struct UpdateItem {
|
||||
user_group: UserGroupToml,
|
||||
update_users: bool,
|
||||
update_permissions: bool,
|
||||
}
|
||||
|
||||
pub struct DeleteItem {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub async fn get_updates(
|
||||
user_groups: Vec<UserGroupToml>,
|
||||
delete: bool,
|
||||
) -> anyhow::Result<(
|
||||
Vec<UserGroupToml>,
|
||||
Vec<UpdateItem>,
|
||||
Vec<DeleteItem>,
|
||||
)> {
|
||||
let map = name_to_user_group();
|
||||
|
||||
let mut to_create = Vec::<UserGroupToml>::new();
|
||||
let mut to_update = Vec::<UpdateItem>::new();
|
||||
let mut to_delete = Vec::<DeleteItem>::new();
|
||||
|
||||
if delete {
|
||||
for user_group in map.values() {
|
||||
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
|
||||
to_delete.push(DeleteItem {
|
||||
id: user_group.id.clone(),
|
||||
name: user_group.name.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let id_to_user = id_to_user();
|
||||
|
||||
for mut user_group in user_groups {
|
||||
let original = match map.get(&user_group.name).cloned() {
|
||||
Some(original) => original,
|
||||
None => {
|
||||
println!(
|
||||
"\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
|
||||
"CREATE".green(),
|
||||
user_group.name.bold().green(),
|
||||
"users".dimmed(),
|
||||
user_group.users,
|
||||
"permissions".dimmed(),
|
||||
user_group.permissions,
|
||||
);
|
||||
to_create.push(user_group);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let mut original_users = original
|
||||
.users
|
||||
.into_iter()
|
||||
.filter_map(|user_id| {
|
||||
id_to_user.get(&user_id).map(|u| u.username.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut original_permissions = crate::state::monitor_client()
|
||||
.read(ListUserTargetPermissions {
|
||||
user_target: UserTarget::UserGroup(original.id),
|
||||
})
|
||||
.await
|
||||
.context("failed to query for existing UserGroup permissions")?
|
||||
.into_iter()
|
||||
.map(|mut p| {
|
||||
// replace the ids with names
|
||||
match &mut p.resource_target {
|
||||
ResourceTarget::System(_) => {}
|
||||
ResourceTarget::Build(id) => {
|
||||
*id = id_to_build()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
*id = id_to_builder()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
*id = id_to_deployment()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
*id = id_to_server()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
*id = id_to_repo()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
*id = id_to_alerter()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
*id = id_to_procedure()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = id_to_server_template()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
*id = id_to_resource_sync()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
PermissionToml {
|
||||
target: p.resource_target,
|
||||
level: p.level,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
original_users.sort();
|
||||
user_group.users.sort();
|
||||
|
||||
user_group.permissions.sort_by(sort_permissions);
|
||||
original_permissions.sort_by(sort_permissions);
|
||||
|
||||
let update_users = user_group.users != original_users;
|
||||
let update_permissions =
|
||||
user_group.permissions != original_permissions;
|
||||
|
||||
// only push update after failed diff
|
||||
if update_users || update_permissions {
|
||||
println!(
|
||||
"\n{}: user group: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
user_group.name.bold(),
|
||||
);
|
||||
let mut lines = Vec::<String>::new();
|
||||
if update_users {
|
||||
let adding = user_group
|
||||
.users
|
||||
.iter()
|
||||
.filter(|user| !original_users.contains(user))
|
||||
.map(|user| user.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
let adding = if adding.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
adding.join(", ").green()
|
||||
};
|
||||
let removing = original_users
|
||||
.iter()
|
||||
.filter(|user| !user_group.users.contains(user))
|
||||
.map(|user| user.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
let removing = if removing.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
removing.join(", ").red()
|
||||
};
|
||||
lines.push(format!(
|
||||
"{}: 'users'\n{}: {removing}\n{}: {adding}",
|
||||
"field".dimmed(),
|
||||
"removing".dimmed(),
|
||||
"adding".dimmed(),
|
||||
))
|
||||
}
|
||||
if update_permissions {
|
||||
let adding = user_group
|
||||
.permissions
|
||||
.iter()
|
||||
.filter(|permission| {
|
||||
!original_permissions.contains(permission)
|
||||
})
|
||||
.map(|permission| format!("{permission:?}"))
|
||||
.collect::<Vec<_>>();
|
||||
let adding = if adding.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
adding.join(", ").green()
|
||||
};
|
||||
let removing = original_permissions
|
||||
.iter()
|
||||
.filter(|permission| {
|
||||
!user_group.permissions.contains(permission)
|
||||
})
|
||||
.map(|permission| format!("{permission:?}"))
|
||||
.collect::<Vec<_>>();
|
||||
let removing = if removing.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
removing.join(", ").red()
|
||||
};
|
||||
lines.push(format!(
|
||||
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
|
||||
"field".dimmed(),
|
||||
"removing".dimmed(),
|
||||
"adding".dimmed()
|
||||
))
|
||||
}
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
to_update.push(UpdateItem {
|
||||
user_group,
|
||||
update_users,
|
||||
update_permissions,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for d in &to_delete {
|
||||
println!(
|
||||
"\n{}: user group: '{}'\n-------------------",
|
||||
"DELETE".red(),
|
||||
d.name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update, to_delete))
|
||||
}
|
||||
|
||||
/// order permissions in deterministic way
|
||||
fn sort_permissions(
|
||||
a: &PermissionToml,
|
||||
b: &PermissionToml,
|
||||
) -> Ordering {
|
||||
let (a_t, a_id) = a.target.extract_variant_id();
|
||||
let (b_t, b_id) = b.target.extract_variant_id();
|
||||
match (a_t.cmp(&b_t), a_id.cmp(b_id)) {
|
||||
(Ordering::Greater, _) => Ordering::Greater,
|
||||
(Ordering::Less, _) => Ordering::Less,
|
||||
(_, Ordering::Greater) => Ordering::Greater,
|
||||
(_, Ordering::Less) => Ordering::Less,
|
||||
_ => Ordering::Equal,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_updates(
|
||||
to_create: Vec<UserGroupToml>,
|
||||
to_update: Vec<UpdateItem>,
|
||||
to_delete: Vec<DeleteItem>,
|
||||
) {
|
||||
// Create the non-existant user groups
|
||||
for user_group in to_create {
|
||||
// Create the user group
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(CreateUserGroup {
|
||||
name: user_group.name.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to create user group {} | {e:#}",
|
||||
user_group.name
|
||||
);
|
||||
continue;
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}'",
|
||||
"created".green().bold(),
|
||||
user_group.name.bold(),
|
||||
);
|
||||
};
|
||||
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
run_update_permissions(user_group.name, user_group.permissions)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Update the existing user groups
|
||||
for UpdateItem {
|
||||
user_group,
|
||||
update_users,
|
||||
update_permissions,
|
||||
} in to_update
|
||||
{
|
||||
if update_users {
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
}
|
||||
if update_permissions {
|
||||
run_update_permissions(user_group.name, user_group.permissions)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
for user_group in to_delete {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(DeleteUserGroup { id: user_group.id })
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to delete user group {} | {e:#}",
|
||||
user_group.name
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}'",
|
||||
"deleted".red().bold(),
|
||||
user_group.name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn set_users(user_group: String, users: Vec<String>) {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(SetUsersInUserGroup {
|
||||
user_group: user_group.clone(),
|
||||
users,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to set users in group {user_group} | {e:#}");
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}' users",
|
||||
"updated".blue().bold(),
|
||||
user_group.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_update_permissions(
|
||||
user_group: String,
|
||||
permissions: Vec<PermissionToml>,
|
||||
) {
|
||||
for PermissionToml { target, level } in permissions {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(UpdatePermissionOnTarget {
|
||||
user_target: UserTarget::UserGroup(user_group.clone()),
|
||||
resource_target: target.clone(),
|
||||
permission: level,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to set permssion in group {user_group} | target: {target:?} | {e:#}",
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}' permissions",
|
||||
"updated".blue().bold(),
|
||||
user_group.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,206 +0,0 @@
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateVariable, DeleteVariable, UpdateVariableDescription,
|
||||
UpdateVariableValue,
|
||||
},
|
||||
entities::variable::Variable,
|
||||
};
|
||||
|
||||
use crate::{maps::name_to_variable, state::monitor_client};
|
||||
|
||||
pub struct ToUpdateItem {
|
||||
pub variable: Variable,
|
||||
pub update_value: bool,
|
||||
pub update_description: bool,
|
||||
}
|
||||
|
||||
pub fn get_updates(
|
||||
variables: Vec<Variable>,
|
||||
delete: bool,
|
||||
) -> anyhow::Result<(Vec<Variable>, Vec<ToUpdateItem>, Vec<String>)> {
|
||||
let map = name_to_variable();
|
||||
|
||||
let mut to_create = Vec::<Variable>::new();
|
||||
let mut to_update = Vec::<ToUpdateItem>::new();
|
||||
let mut to_delete = Vec::<String>::new();
|
||||
|
||||
if delete {
|
||||
for variable in map.values() {
|
||||
if !variables.iter().any(|v| v.name == variable.name) {
|
||||
to_delete.push(variable.name.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for variable in variables {
|
||||
match map.get(&variable.name) {
|
||||
Some(original) => {
|
||||
let item = ToUpdateItem {
|
||||
update_value: original.value != variable.value,
|
||||
update_description: original.description
|
||||
!= variable.description,
|
||||
variable,
|
||||
};
|
||||
if !item.update_value && !item.update_description {
|
||||
continue;
|
||||
}
|
||||
println!(
|
||||
"\n{}: variable: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
item.variable.name.bold(),
|
||||
);
|
||||
|
||||
let mut lines = Vec::<String>::new();
|
||||
|
||||
if item.update_value {
|
||||
lines.push(format!(
|
||||
"{}: 'value'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.value.red(),
|
||||
"to".dimmed(),
|
||||
item.variable.value.green()
|
||||
))
|
||||
}
|
||||
|
||||
if item.update_description {
|
||||
lines.push(format!(
|
||||
"{}: 'description'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.description.red(),
|
||||
"to".dimmed(),
|
||||
item.variable.description.green()
|
||||
))
|
||||
}
|
||||
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
|
||||
to_update.push(item);
|
||||
}
|
||||
None => {
|
||||
if variable.description.is_empty() {
|
||||
println!(
|
||||
"\n{}: variable: {}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
variable.name.bold().green(),
|
||||
"value".dimmed(),
|
||||
variable.value,
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"\n{}: variable: {}\n{}: {}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
variable.name.bold().green(),
|
||||
"description".dimmed(),
|
||||
variable.description,
|
||||
"value".dimmed(),
|
||||
variable.value,
|
||||
);
|
||||
}
|
||||
to_create.push(variable)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name in &to_delete {
|
||||
println!(
|
||||
"\n{}: variable: '{}'\n-------------------",
|
||||
"DELETE".red(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update, to_delete))
|
||||
}
|
||||
|
||||
pub async fn run_updates(
|
||||
to_create: Vec<Variable>,
|
||||
to_update: Vec<ToUpdateItem>,
|
||||
to_delete: Vec<String>,
|
||||
) {
|
||||
for variable in to_create {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(CreateVariable {
|
||||
name: variable.name.clone(),
|
||||
value: variable.value,
|
||||
description: variable.description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to create variable {} | {e:#}", variable.name);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}'",
|
||||
"created".green().bold(),
|
||||
variable.name.bold(),
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
for ToUpdateItem {
|
||||
variable,
|
||||
update_value,
|
||||
update_description,
|
||||
} in to_update
|
||||
{
|
||||
if update_value {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateVariableValue {
|
||||
name: variable.name.clone(),
|
||||
value: variable.value,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update variable value for {} | {e:#}",
|
||||
variable.name
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}' value",
|
||||
"updated".blue().bold(),
|
||||
variable.name.bold(),
|
||||
);
|
||||
};
|
||||
}
|
||||
if update_description {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateVariableDescription {
|
||||
name: variable.name.clone(),
|
||||
description: variable.description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update variable description for {} | {e:#}",
|
||||
variable.name
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}' description",
|
||||
"updated".blue().bold(),
|
||||
variable.name.bold(),
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
for variable in to_delete {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(DeleteVariable {
|
||||
name: variable.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to delete variable {variable} | {e:#}",);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}'",
|
||||
"deleted".red().bold(),
|
||||
variable.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
[package]
|
||||
name = "monitor_core"
|
||||
name = "komodo_core"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
@@ -15,8 +15,9 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client = { workspace = true, features = ["mongo"] }
|
||||
komodo_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
formatting.workspace = true
|
||||
logger.workspace = true
|
||||
git.workspace = true
|
||||
@@ -29,35 +30,34 @@ derive_variants.workspace = true
|
||||
mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
toml_pretty.workspace = true
|
||||
run_command.workspace = true
|
||||
parse_csl.workspace = true
|
||||
mungos.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
axum-server.workspace = true
|
||||
ordered_hash_map.workspace = true
|
||||
openidconnect.workspace = true
|
||||
urlencoding.workspace = true
|
||||
aws-sdk-ec2.workspace = true
|
||||
aws-sdk-ecr.workspace = true
|
||||
aws-config.workspace = true
|
||||
tokio-util.workspace = true
|
||||
axum-extra.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
typeshare.workspace = true
|
||||
octorust.workspace = true
|
||||
dashmap.workspace = true
|
||||
tracing.workspace = true
|
||||
reqwest.workspace = true
|
||||
futures.workspace = true
|
||||
nom_pem.workspace = true
|
||||
anyhow.workspace = true
|
||||
dotenv.workspace = true
|
||||
dotenvy.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
tokio.workspace = true
|
||||
tower.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
regex.workspace = true
|
||||
axum.workspace = true
|
||||
toml.workspace = true
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# Build Core
|
||||
FROM rust:1.79.0-bookworm as core-builder
|
||||
WORKDIR /builder
|
||||
COPY . .
|
||||
RUN cargo build -p monitor_core --release
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine as frontend-builder
|
||||
WORKDIR /builder
|
||||
COPY ./frontend ./frontend
|
||||
COPY ./client/core/ts ./client
|
||||
RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
# Install Deps
|
||||
RUN apt update && apt install -y git curl unzip ca-certificates && \
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \
|
||||
unzip awscliv2.zip && \
|
||||
./aws/install
|
||||
|
||||
# Copy
|
||||
COPY ./config_example/core.config.example.toml /config/config.toml
|
||||
COPY --from=core-builder /builder/target/release/core /
|
||||
COPY --from=frontend-builder /builder/frontend/dist /frontend
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9000
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
|
||||
LABEL org.opencontainers.image.description="A tool to build and deploy software across many servers"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
CMD ["./core"]
|
||||
45
bin/core/alpine.Dockerfile
Normal file
45
bin/core/alpine.Dockerfile
Normal file
@@ -0,0 +1,45 @@
|
||||
## This one produces smaller images,
|
||||
## but alpine uses `musl` instead of `glibc`.
|
||||
## This makes it take longer / more resources to build,
|
||||
## and may negatively affect runtime performance.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.81.0-alpine AS core-builder
|
||||
WORKDIR /builder
|
||||
RUN apk update && apk --no-cache add musl-dev openssl-dev openssl-libs-static
|
||||
COPY . .
|
||||
RUN cargo build -p komodo_core --release
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
WORKDIR /builder
|
||||
COPY ./frontend ./frontend
|
||||
COPY ./client/core/ts ./client
|
||||
RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
FROM alpine:3.20
|
||||
|
||||
# Install Deps
|
||||
RUN apk update && apk add --no-cache --virtual .build-deps \
|
||||
openssl ca-certificates git git-lfs
|
||||
|
||||
# Setup an application directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY --from=core-builder /builder/target/release/core /app
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
# Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose.
|
||||
ENTRYPOINT [ "/app/core" ]
|
||||
39
bin/core/debian.Dockerfile
Normal file
39
bin/core/debian.Dockerfile
Normal file
@@ -0,0 +1,39 @@
|
||||
# Build Core
|
||||
FROM rust:1.81.0-bullseye AS core-builder
|
||||
WORKDIR /builder
|
||||
COPY . .
|
||||
RUN cargo build -p komodo_core --release
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
WORKDIR /builder
|
||||
COPY ./frontend ./frontend
|
||||
COPY ./client/core/ts ./client
|
||||
RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link @komodo/client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Setup an application directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY --from=core-builder /builder/target/release/core /app
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/mbecker20/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
|
||||
ENTRYPOINT [ "/app/core" ]
|
||||
169
bin/core/src/alert/discord.rs
Normal file
169
bin/core/src/alert/discord.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} is now *reachable*\n{link}"
|
||||
)
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\n**error**: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | *{name}*{region} is *unreachable* ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | *{name}*{region} cpu usage at *{percentage:.1}%*\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾\n\nUsing *{used_gb:.1} GiB* / *{total_gb:.1} GiB*\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | *{name}*{region} disk usage at *{percentage:.1}%* 💿\nmount point: `{path:?}`\nusing *{used_gb:.1} GiB* / *{total_gb:.1} GiB*\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to = fmt_docker_container_state(to);
|
||||
format!("📦 Deployment *{name}* is now {to}\nserver: {server_name}\nprevious: {from}\n{link}")
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to = fmt_stack_state(to);
|
||||
format!("🥞 Stack *{name}* is now {to}\nserver: {server_name}\nprevious: {from}\n{link}")
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!("{level} | Failed to terminated AWS builder instance\ninstance id: *{instance_id}*\n{message}")
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on *{name}*\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!("{level} | Build *{name}* failed\nversion: v{version}\n{link}")
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for *{name}* failed\n{link}")
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !content.is_empty() {
|
||||
send_message(url, &content).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
url: &str,
|
||||
content: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let body = DiscordMessageBody { content };
|
||||
|
||||
let response = http_client()
|
||||
.post(url)
|
||||
.json(&body)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send message")?;
|
||||
|
||||
let status = response.status();
|
||||
|
||||
if status.is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!("Failed to send message to Discord | {status} | failed to get response text")
|
||||
})?;
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to send message to Discord | {status} | {text}"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn http_client() -> &'static reqwest::Client {
|
||||
static CLIENT: OnceLock<reqwest::Client> = OnceLock::new();
|
||||
CLIENT.get_or_init(reqwest::Client::new)
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DiscordMessageBody<'a> {
|
||||
content: &'a str,
|
||||
}
|
||||
207
bin/core/src/alert/mod.rs
Normal file
207
bin/core/src/alert/mod.rs
Normal file
@@ -0,0 +1,207 @@
|
||||
use ::slack::types::Block;
|
||||
use anyhow::{anyhow, Context};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures::future::join_all;
|
||||
use komodo_client::entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alerter::*,
|
||||
deployment::DeploymentState,
|
||||
stack::StackState,
|
||||
ResourceTargetVariant,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
mod discord;
|
||||
mod slack;
|
||||
|
||||
#[instrument]
|
||||
pub async fn send_alerts(alerts: &[Alert]) {
|
||||
if alerts.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"ERROR sending alerts | failed to get alerters from db | {e:#}"
|
||||
)
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let alert_type = alert.data.extract_variant();
|
||||
|
||||
let handles = alerters.iter().map(|alerter| async {
|
||||
// Don't send if not enabled
|
||||
if !alerter.config.enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Don't send if alert type not configured on the alerter
|
||||
if !alerter.config.alert_types.is_empty()
|
||||
&& !alerter.config.alert_types.contains(&alert_type)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Don't send if resource is in the blacklist
|
||||
if alerter.config.except_resources.contains(&alert.target) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Don't send if whitelist configured and target is not included
|
||||
if !alerter.config.resources.is_empty()
|
||||
&& !alerter.config.resources.contains(&alert.target)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match &alerter.config.endpoint {
|
||||
AlerterEndpoint::Custom(CustomAlerterEndpoint { url }) => {
|
||||
send_custom_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"failed to send alert to custom alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Slack(SlackAlerterEndpoint { url }) => {
|
||||
slack::send_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"failed to send alert to slack alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Discord(DiscordAlerterEndpoint { url }) => {
|
||||
discord::send_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"failed to send alert to Discord alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_all(handles)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|res| res.err())
|
||||
.for_each(|e| error!("{e:#}"));
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let res = reqwest::Client::new()
|
||||
.post(url)
|
||||
.json(alert)
|
||||
.send()
|
||||
.await
|
||||
.context("failed at post request to alerter")?;
|
||||
let status = res.status();
|
||||
if !status.is_success() {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response text on alerter response")?;
|
||||
return Err(anyhow!(
|
||||
"post to alerter failed | {status} | {text}"
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fmt_region(region: &Option<String>) -> String {
|
||||
match region {
|
||||
Some(region) => format!(" ({region})"),
|
||||
None => String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_docker_container_state(state: &DeploymentState) -> String {
|
||||
match state {
|
||||
DeploymentState::Running => String::from("Running ▶️"),
|
||||
DeploymentState::Exited => String::from("Exited 🛑"),
|
||||
DeploymentState::Restarting => String::from("Restarting 🔄"),
|
||||
DeploymentState::NotDeployed => String::from("Not Deployed"),
|
||||
_ => state.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_stack_state(state: &StackState) -> String {
|
||||
match state {
|
||||
StackState::Running => String::from("Running ▶️"),
|
||||
StackState::Stopped => String::from("Stopped 🛑"),
|
||||
StackState::Restarting => String::from("Restarting 🔄"),
|
||||
StackState::Down => String::from("Down ⬇️"),
|
||||
_ => state.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_level(level: SeverityLevel) -> &'static str {
|
||||
match level {
|
||||
SeverityLevel::Critical => "CRITICAL 🚨",
|
||||
SeverityLevel::Warning => "WARNING ‼️",
|
||||
SeverityLevel::Ok => "OK ✅",
|
||||
}
|
||||
}
|
||||
|
||||
fn resource_link(
|
||||
resource_type: ResourceTargetVariant,
|
||||
id: &str,
|
||||
) -> String {
|
||||
let path = match resource_type {
|
||||
ResourceTargetVariant::System => unreachable!(),
|
||||
ResourceTargetVariant::Build => format!("/builds/{id}"),
|
||||
ResourceTargetVariant::Builder => {
|
||||
format!("/builders/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Deployment => {
|
||||
format!("/deployments/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Stack => {
|
||||
format!("/stacks/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Server => {
|
||||
format!("/servers/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Repo => format!("/repos/{id}"),
|
||||
ResourceTargetVariant::Alerter => {
|
||||
format!("/alerters/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Procedure => {
|
||||
format!("/procedures/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
format!("/server-templates/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
format!("/resource-syncs/{id}")
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}{path}", core_config().host)
|
||||
}
|
||||
@@ -1,130 +1,7 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures::future::join_all;
|
||||
use monitor_client::entities::{
|
||||
alert::{Alert, AlertData},
|
||||
alerter::*,
|
||||
deployment::DeploymentState,
|
||||
server::stats::SeverityLevel,
|
||||
update::ResourceTargetVariant,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use slack::types::Block;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
#[instrument]
|
||||
pub async fn send_alerts(alerts: &[Alert]) {
|
||||
if alerts.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().await.alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"ERROR sending alerts | failed to get alerters from db | {e:#}"
|
||||
)
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let alert_type = alert.data.extract_variant();
|
||||
|
||||
let handles = alerters.iter().map(|alerter| async {
|
||||
// Don't send if not enabled
|
||||
if !alerter.config.enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Don't send if alert type not configured on the alerter
|
||||
if !alerter.config.alert_types.is_empty()
|
||||
&& !alerter.config.alert_types.contains(&alert_type)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Don't send if resource is in the blacklist
|
||||
if alerter.config.except_resources.contains(&alert.target) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Don't send if whitelist configured and target is not included
|
||||
if !alerter.config.resources.is_empty()
|
||||
&& !alerter.config.resources.contains(&alert.target)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match &alerter.config.endpoint {
|
||||
AlerterEndpoint::Slack(SlackAlerterEndpoint { url }) => {
|
||||
send_slack_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"failed to send alert to slack alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Custom(CustomAlerterEndpoint { url }) => {
|
||||
send_custom_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"failed to send alert to custom alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_all(handles)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|res| res.err())
|
||||
.for_each(|e| error!("{e:#}"));
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let res = reqwest::Client::new()
|
||||
.post(url)
|
||||
.json(alert)
|
||||
.send()
|
||||
.await
|
||||
.context("failed at post request to alerter")?;
|
||||
let status = res.status();
|
||||
if !status.is_success() {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response text on alerter response")?;
|
||||
return Err(anyhow!(
|
||||
"post to alerter failed | {status} | {text}"
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_slack_alert(
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
@@ -305,7 +182,7 @@ async fn send_slack_alert(
|
||||
..
|
||||
} => {
|
||||
let to = fmt_docker_container_state(to);
|
||||
let text = format!("📦 container *{name}* is now {to}");
|
||||
let text = format!("📦 Container *{name}* is now {to}");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!(
|
||||
@@ -318,6 +195,28 @@ async fn send_slack_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
name,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
id,
|
||||
..
|
||||
} => {
|
||||
let to = fmt_stack_state(to);
|
||||
let text = format!("🥞 Stack *{name}* is now {to}");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!(
|
||||
"server: {server_name}\nprevious: {from}",
|
||||
)),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Stack,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
@@ -359,70 +258,26 @@ async fn send_slack_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let text =
|
||||
format!("{level} | Repo build for {name} has failed");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!(
|
||||
"repo id: *{id}*\nrepo name: *{name}*",
|
||||
)),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Repo,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
let slack = slack::Client::new(url);
|
||||
let slack = ::slack::Client::new(url);
|
||||
slack.send_message(text, blocks).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fmt_region(region: &Option<String>) -> String {
|
||||
match region {
|
||||
Some(region) => format!(" ({region})"),
|
||||
None => String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_docker_container_state(state: &DeploymentState) -> String {
|
||||
match state {
|
||||
DeploymentState::Running => String::from("Running ▶️"),
|
||||
DeploymentState::Exited => String::from("Exited 🛑"),
|
||||
DeploymentState::Restarting => String::from("Restarting 🔄"),
|
||||
DeploymentState::NotDeployed => String::from("Not Deployed"),
|
||||
_ => state.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_level(level: SeverityLevel) -> &'static str {
|
||||
match level {
|
||||
SeverityLevel::Critical => "CRITICAL 🚨",
|
||||
SeverityLevel::Warning => "WARNING ‼️",
|
||||
SeverityLevel::Ok => "OK ✅",
|
||||
}
|
||||
}
|
||||
|
||||
fn resource_link(
|
||||
resource_type: ResourceTargetVariant,
|
||||
id: &str,
|
||||
) -> String {
|
||||
let path = match resource_type {
|
||||
ResourceTargetVariant::System => unreachable!(),
|
||||
ResourceTargetVariant::Build => format!("/builds/{id}"),
|
||||
ResourceTargetVariant::Builder => {
|
||||
format!("/builders/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Deployment => {
|
||||
format!("/deployments/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Server => {
|
||||
format!("/servers/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Repo => format!("/repos/{id}"),
|
||||
ResourceTargetVariant::Alerter => {
|
||||
format!("/alerters/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Procedure => {
|
||||
format!("/procedures/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
format!("/server-templates/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
format!("/resource-syncs/{id}")
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}{path}", core_config().host)
|
||||
}
|
||||
@@ -3,10 +3,11 @@ use std::{sync::OnceLock, time::Instant};
|
||||
use anyhow::anyhow;
|
||||
use axum::{http::HeaderMap, routing::post, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::auth::*, entities::user::User};
|
||||
use komodo_client::{api::auth::*, entities::user::User};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use serror::{AddStatusCode, Json};
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -15,6 +16,7 @@ use crate::{
|
||||
get_user_id_from_headers,
|
||||
github::{self, client::github_oauth_client},
|
||||
google::{self, client::google_oauth_client},
|
||||
oidc,
|
||||
},
|
||||
config::core_config,
|
||||
helpers::query::get_user,
|
||||
@@ -38,14 +40,25 @@ pub enum AuthRequest {
|
||||
pub fn router() -> Router {
|
||||
let mut router = Router::new().route("/", post(handler));
|
||||
|
||||
if core_config().local_auth {
|
||||
info!("🔑 Local Login Enabled");
|
||||
}
|
||||
|
||||
if github_oauth_client().is_some() {
|
||||
info!("🔑 Github Login Enabled");
|
||||
router = router.nest("/github", github::router())
|
||||
}
|
||||
|
||||
if google_oauth_client().is_some() {
|
||||
info!("🔑 Github Login Enabled");
|
||||
router = router.nest("/google", google::router())
|
||||
}
|
||||
|
||||
if core_config().oidc_enabled {
|
||||
info!("🔑 OIDC Login Enabled");
|
||||
router = router.nest("/oidc", oidc::router())
|
||||
}
|
||||
|
||||
router
|
||||
}
|
||||
|
||||
@@ -70,7 +83,10 @@ async fn handler(
|
||||
}
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/auth request {req_id} | resolve time: {elapsed:?}");
|
||||
Ok((TypedHeader(ContentType::json()), res?))
|
||||
Ok((
|
||||
TypedHeader(ContentType::json()),
|
||||
res.status_code(StatusCode::UNAUTHORIZED)?,
|
||||
))
|
||||
}
|
||||
|
||||
fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
@@ -87,6 +103,11 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
google: config.google_oauth.enabled
|
||||
&& !config.google_oauth.id.is_empty()
|
||||
&& !config.google_oauth.secret.is_empty(),
|
||||
oidc: config.oidc_enabled
|
||||
&& !config.oidc_provider.is_empty()
|
||||
&& !config.oidc_client_id.is_empty()
|
||||
&& !config.oidc_client_secret.is_empty(),
|
||||
registration_disabled: config.disable_user_registration,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,24 +1,18 @@
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::{format_serror, muted};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
api::execute::{
|
||||
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
|
||||
},
|
||||
use komodo_client::{
|
||||
api::execute::{CancelBuild, Deploy, RunBuild},
|
||||
entities::{
|
||||
alert::{Alert, AlertData},
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
all_logs_success,
|
||||
build::{Build, CloudRegistryConfig, ImageRegistry},
|
||||
builder::{AwsBuilderConfig, Builder, BuilderConfig},
|
||||
config::core::{AwsEcrConfig, AwsEcrConfigWithCredentials},
|
||||
build::{Build, BuildConfig, ImageRegistryConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
monitor_timestamp,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::{stats::SeverityLevel, Server},
|
||||
server_template::aws::AwsServerTemplateConfig,
|
||||
to_monitor_name,
|
||||
update::{Log, Update},
|
||||
user::{auto_redeploy_user, User},
|
||||
},
|
||||
@@ -31,38 +25,30 @@ use mungos::{
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::{
|
||||
api::{self, GetVersionResponse},
|
||||
PeripheryClient,
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{
|
||||
cloud::{
|
||||
aws::{
|
||||
ec2::{
|
||||
launch_ec2_instance, terminate_ec2_instance_with_retry,
|
||||
Ec2Instance,
|
||||
},
|
||||
ecr,
|
||||
},
|
||||
BuildCleanupData,
|
||||
},
|
||||
config::core_config,
|
||||
alert::send_alerts,
|
||||
helpers::{
|
||||
alert::send_alerts,
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::build_cancel_channel,
|
||||
periphery_client,
|
||||
query::{get_deployment_state, get_global_variables},
|
||||
update::update_update,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
query::{get_deployment_state, get_variables_and_secrets},
|
||||
registry_token,
|
||||
update::{init_execution_update, update_update},
|
||||
},
|
||||
resource::{self, refresh_build_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
use crate::helpers::update::init_execution_update;
|
||||
|
||||
use super::ExecuteRequest;
|
||||
|
||||
impl Resolve<RunBuild, (User, Update)> for State {
|
||||
@@ -78,9 +64,11 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
let (registry_token, aws_ecr) =
|
||||
validate_account_extract_registry_token_aws_ecr(&build).await?;
|
||||
if build.config.builder_id.is_empty() {
|
||||
return Err(anyhow!("Must attach builder to RunBuild"));
|
||||
}
|
||||
|
||||
// get the action state for the build (or insert default).
|
||||
let action_state =
|
||||
@@ -91,16 +79,37 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.building = true)?;
|
||||
|
||||
build.config.version.increment();
|
||||
if build.config.auto_increment_version {
|
||||
build.config.version.increment();
|
||||
}
|
||||
update.version = build.config.version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token = git_token(
|
||||
&build.config.git_provider,
|
||||
&build.config.git_account,
|
||||
|https| build.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account),
|
||||
)?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
let mut cancel_recv =
|
||||
build_cancel_channel().receiver.resubscribe();
|
||||
let build_id = build.id.clone();
|
||||
|
||||
let builder =
|
||||
resource::get::<Builder>(&build.config.builder_id).await?;
|
||||
|
||||
let is_server_builder =
|
||||
matches!(&builder.config, BuilderConfig::Server(_));
|
||||
|
||||
tokio::spawn(async move {
|
||||
let poll = async {
|
||||
loop {
|
||||
@@ -109,16 +118,19 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
id = cancel_recv.recv() => id?
|
||||
};
|
||||
if incoming_build_id == build_id {
|
||||
update.push_simple_log(
|
||||
"cancel acknowledged",
|
||||
"the build cancellation has been queued, it may still take some time",
|
||||
);
|
||||
if is_server_builder {
|
||||
update.push_error_log("Cancel acknowledged", "Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature.");
|
||||
} else {
|
||||
update.push_simple_log("Cancel acknowledged", "The build cancellation has been queued, it may still take some time.");
|
||||
}
|
||||
update.finalize();
|
||||
let id = update.id.clone();
|
||||
if let Err(e) = update_update(update).await {
|
||||
warn!("failed to update Update {id} | {e:#}");
|
||||
warn!("failed to modify Update {id} on db | {e:#}");
|
||||
}
|
||||
if !is_server_builder {
|
||||
cancel_clone.cancel();
|
||||
}
|
||||
cancel_clone.cancel();
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
@@ -133,43 +145,68 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) =
|
||||
match get_build_builder(&build, &mut update).await {
|
||||
Ok(builder) => {
|
||||
info!("got builder for build");
|
||||
builder
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed to get builder | {e:#}");
|
||||
update.logs.push(Log::error(
|
||||
"get builder",
|
||||
format_serror(&e.context("failed to get builder").into()),
|
||||
));
|
||||
return handle_early_return(
|
||||
update, build.id, build.name, false,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
};
|
||||
|
||||
let core_config = core_config();
|
||||
let variables = get_global_variables().await?;
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
build.name.clone(),
|
||||
Some(build.config.version),
|
||||
builder,
|
||||
&mut update,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(builder) => builder,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to get builder for build {} | {e:#}",
|
||||
build.name
|
||||
);
|
||||
update.logs.push(Log::error(
|
||||
"get builder",
|
||||
format_serror(&e.context("failed to get builder").into()),
|
||||
));
|
||||
return handle_early_return(
|
||||
update, build.id, build.name, false,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
};
|
||||
|
||||
// CLONE REPO
|
||||
|
||||
let github_token = core_config
|
||||
.github_accounts
|
||||
.get(&build.config.github_account)
|
||||
.cloned();
|
||||
let secret_replacers = if !build.config.skip_secret_interp {
|
||||
// Interpolate variables / secrets into pre build command
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.pre_build,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&build).into(),
|
||||
github_token,
|
||||
git_token,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during clone, cleaning up builder");
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
@@ -178,10 +215,13 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
},
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(clone_logs) => {
|
||||
info!("finished repo clone");
|
||||
update.logs.extend(clone_logs);
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.commit_hash =
|
||||
res.commit_hash.unwrap_or_default().to_string();
|
||||
res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed build at clone repo | {e:#}");
|
||||
@@ -189,99 +229,62 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if all_logs_success(&update.logs) {
|
||||
// Interpolate variables / secrets into build args
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut secret_replacers_for_log = HashSet::new();
|
||||
let secret_replacers = if !build.config.skip_secret_interp {
|
||||
// Interpolate variables / secrets into build args
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
// Interpolate into build args
|
||||
for arg in &mut build.config.build_args {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&arg.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers_for_log.extend(
|
||||
more_replacers.iter().map(|(_, variable)| variable.clone()),
|
||||
);
|
||||
secret_replacers.extend(more_replacers);
|
||||
arg.value = res;
|
||||
}
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.build_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
// Interpolate into secret args
|
||||
for arg in &mut build.config.secret_args {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&arg.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers_for_log.extend(
|
||||
more_replacers.into_iter().map(|(_, variable)| variable),
|
||||
);
|
||||
// Secret args don't need to be in replacers sent to periphery.
|
||||
// The secret args don't end up in the command like build args do.
|
||||
arg.value = res;
|
||||
}
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.secret_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.into_iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
}
|
||||
if !secret_replacers_for_log.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers_for_log
|
||||
.into_iter()
|
||||
.map(|variable| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
registry_token,
|
||||
aws_ecr,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
// Push a commit hash tagged image
|
||||
additional_tags: if update.commit_hash.is_empty() {
|
||||
Default::default()
|
||||
} else {
|
||||
vec![update.commit_hash.clone()]
|
||||
},
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
@@ -294,7 +297,7 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
|
||||
match res {
|
||||
Ok(logs) => {
|
||||
info!("finished build");
|
||||
debug!("finished build");
|
||||
update.logs.extend(logs);
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -309,20 +312,20 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
|
||||
update.finalize();
|
||||
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
if update.success {
|
||||
let _ = db
|
||||
.builds
|
||||
.update_one(
|
||||
doc! { "name": &build.name },
|
||||
doc! {
|
||||
"$set": {
|
||||
"config.version": to_bson(&build.config.version)
|
||||
.context("failed at converting version to bson")?,
|
||||
"info.last_built_at": monitor_timestamp(),
|
||||
}
|
||||
},
|
||||
doc! { "$set": {
|
||||
"config.version": to_bson(&build.config.version)
|
||||
.context("failed at converting version to bson")?,
|
||||
"info.last_built_at": komodo_timestamp(),
|
||||
"info.built_hash": &update.commit_hash,
|
||||
"info.built_message": commit_message
|
||||
}},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -363,8 +366,8 @@ impl Resolve<RunBuild, (User, Update)> for State {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
target,
|
||||
ts: monitor_timestamp(),
|
||||
resolved_ts: Some(monitor_timestamp()),
|
||||
ts: komodo_timestamp(),
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::BuildFailed {
|
||||
@@ -395,7 +398,7 @@ async fn handle_early_return(
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
@@ -412,8 +415,8 @@ async fn handle_early_return(
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
target,
|
||||
ts: monitor_timestamp(),
|
||||
resolved_ts: Some(monitor_timestamp()),
|
||||
ts: komodo_timestamp(),
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::BuildFailed {
|
||||
@@ -435,7 +438,7 @@ pub async fn validate_cancel_build(
|
||||
if let ExecuteRequest::CancelBuild(req) = request {
|
||||
let build = resource::get::<Build>(&req.build).await?;
|
||||
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
let (latest_build, latest_cancel) = tokio::try_join!(
|
||||
db.updates
|
||||
@@ -481,7 +484,7 @@ impl Resolve<CancelBuild, (User, Update)> for State {
|
||||
&self,
|
||||
CancelBuild { build }: CancelBuild,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<CancelBuildResponse> {
|
||||
) -> anyhow::Result<Update> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
@@ -506,187 +509,37 @@ impl Resolve<CancelBuild, (User, Update)> for State {
|
||||
);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let update_id = update.id.clone();
|
||||
|
||||
build_cancel_channel()
|
||||
.sender
|
||||
.lock()
|
||||
.await
|
||||
.send((build.id, update))?;
|
||||
.send((build.id, update.clone()))?;
|
||||
|
||||
// Make sure cancel is set to complete after some time in case
|
||||
// no reciever is there to do it. Prevents update stuck in InProgress.
|
||||
let update_id = update.id.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_secs(60)).await;
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&db_client().updates,
|
||||
&update_id,
|
||||
doc! { "$set": { "status": "Complete" } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!("failed to set BuildCancel Update status Complete after timeout | {e:#}")
|
||||
warn!("failed to set CancelBuild Update status Complete after timeout | {e:#}")
|
||||
}
|
||||
});
|
||||
|
||||
Ok(CancelBuildResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
const BUILDER_POLL_RATE_SECS: u64 = 2;
|
||||
const BUILDER_POLL_MAX_TRIES: usize = 30;
|
||||
|
||||
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
|
||||
async fn get_build_builder(
|
||||
build: &Build,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
|
||||
if build.config.builder_id.is_empty() {
|
||||
return Err(anyhow!("build has not configured a builder"));
|
||||
}
|
||||
let builder =
|
||||
resource::get::<Builder>(&build.config.builder_id).await?;
|
||||
match builder.config {
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
return Err(anyhow!("builder has not configured a server"));
|
||||
}
|
||||
let server = resource::get::<Server>(&config.server_id).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
Ok((
|
||||
periphery,
|
||||
BuildCleanupData::Server {
|
||||
repo_name: build.name.clone(),
|
||||
},
|
||||
))
|
||||
}
|
||||
BuilderConfig::Aws(config) => {
|
||||
get_aws_builder(build, config, update).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
|
||||
async fn get_aws_builder(
|
||||
build: &Build,
|
||||
config: AwsBuilderConfig,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
|
||||
let start_create_ts = monitor_timestamp();
|
||||
|
||||
let instance_name =
|
||||
format!("BUILDER-{}-v{}", build.name, build.config.version);
|
||||
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
|
||||
&instance_name,
|
||||
AwsServerTemplateConfig::from_builder_config(&config),
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("ec2 instance launched");
|
||||
|
||||
let log = Log {
|
||||
stage: "start build instance".to_string(),
|
||||
success: true,
|
||||
stdout: start_aws_builder_log(&instance_id, &ip, &config),
|
||||
start_ts: start_create_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery_address = format!("http://{ip}:{}", config.port);
|
||||
let periphery =
|
||||
PeripheryClient::new(&periphery_address, &core_config().passkey);
|
||||
|
||||
let start_connect_ts = monitor_timestamp();
|
||||
let mut res = Ok(GetVersionResponse {
|
||||
version: String::new(),
|
||||
});
|
||||
for _ in 0..BUILDER_POLL_MAX_TRIES {
|
||||
let version = periphery
|
||||
.request(api::GetVersion {})
|
||||
.await
|
||||
.context("failed to reach periphery client on builder");
|
||||
if let Ok(GetVersionResponse { version }) = &version {
|
||||
let connect_log = Log {
|
||||
stage: "build instance connected".to_string(),
|
||||
success: true,
|
||||
stdout: format!(
|
||||
"established contact with periphery on builder\nperiphery version: v{}",
|
||||
version
|
||||
),
|
||||
start_ts: start_connect_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
};
|
||||
update.logs.push(connect_log);
|
||||
update_update(update.clone()).await?;
|
||||
return Ok((
|
||||
periphery,
|
||||
BuildCleanupData::Aws {
|
||||
instance_id,
|
||||
region: config.region,
|
||||
},
|
||||
));
|
||||
}
|
||||
res = version;
|
||||
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
|
||||
.await;
|
||||
}
|
||||
|
||||
// Spawn terminate task in failure case (if loop is passed without return)
|
||||
tokio::spawn(async move {
|
||||
let _ =
|
||||
terminate_ec2_instance_with_retry(config.region, &instance_id)
|
||||
.await;
|
||||
});
|
||||
|
||||
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
|
||||
Err(
|
||||
res.err().unwrap().context(
|
||||
"failed to start usable builder. terminating instance.",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(skip(periphery, update))]
|
||||
async fn cleanup_builder_instance(
|
||||
periphery: PeripheryClient,
|
||||
cleanup_data: BuildCleanupData,
|
||||
update: &mut Update,
|
||||
) {
|
||||
match cleanup_data {
|
||||
BuildCleanupData::Server { repo_name } => {
|
||||
let _ = periphery
|
||||
.request(api::git::DeleteRepo { name: repo_name })
|
||||
.await;
|
||||
}
|
||||
BuildCleanupData::Aws {
|
||||
instance_id,
|
||||
region,
|
||||
} => {
|
||||
let _instance_id = instance_id.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ =
|
||||
terminate_ec2_instance_with_retry(region, &_instance_id)
|
||||
.await;
|
||||
});
|
||||
update.push_simple_log(
|
||||
"terminate instance",
|
||||
format!("termination queued for instance id {instance_id}"),
|
||||
);
|
||||
}
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let Ok(redeploy_deployments) = find_collect(
|
||||
&db_client().await.deployments,
|
||||
&db_client().deployments,
|
||||
doc! {
|
||||
"config.image.params.build_id": build_id,
|
||||
"config.redeploy_on_build": true
|
||||
@@ -741,96 +594,34 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
}
|
||||
}
|
||||
|
||||
fn start_aws_builder_log(
|
||||
instance_id: &str,
|
||||
ip: &str,
|
||||
config: &AwsBuilderConfig,
|
||||
) -> String {
|
||||
let AwsBuilderConfig {
|
||||
ami_id,
|
||||
instance_type,
|
||||
volume_gb,
|
||||
subnet_id,
|
||||
assign_public_ip,
|
||||
security_group_ids,
|
||||
use_public_ip,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let readable_sec_group_ids = security_group_ids.join(", ");
|
||||
|
||||
[
|
||||
format!("{}: {instance_id}", muted("instance id")),
|
||||
format!("{}: {ip}", muted("ip")),
|
||||
format!("{}: {ami_id}", muted("ami id")),
|
||||
format!("{}: {instance_type}", muted("instance type")),
|
||||
format!("{}: {volume_gb} GB", muted("volume size")),
|
||||
format!("{}: {subnet_id}", muted("subnet id")),
|
||||
format!("{}: {readable_sec_group_ids}", muted("security groups")),
|
||||
format!("{}: {assign_public_ip}", muted("assign public ip")),
|
||||
format!("{}: {use_public_ip}", muted("use public ip")),
|
||||
]
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token / aws ecr config matching requirements.
|
||||
/// and will check the core config for a token matching requirements.
|
||||
/// Otherwise it is left to periphery.
|
||||
async fn validate_account_extract_registry_token_aws_ecr(
|
||||
build: &Build,
|
||||
) -> anyhow::Result<(Option<String>, Option<AwsEcrConfig>)> {
|
||||
match &build.config.image_registry {
|
||||
ImageRegistry::None(_) => Ok((None, None)),
|
||||
ImageRegistry::DockerHub(CloudRegistryConfig {
|
||||
account, ..
|
||||
}) => {
|
||||
if account.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must attach account to use DockerHub image registry"
|
||||
));
|
||||
}
|
||||
Ok((core_config().docker_accounts.get(account).cloned(), None))
|
||||
}
|
||||
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
|
||||
if account.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must attach account to use GithubContainerRegistry"
|
||||
));
|
||||
}
|
||||
Ok((core_config().github_accounts.get(account).cloned(), None))
|
||||
}
|
||||
ImageRegistry::AwsEcr(label) => {
|
||||
let config = core_config().aws_ecr_registries.get(label);
|
||||
let token = match config {
|
||||
Some(AwsEcrConfigWithCredentials {
|
||||
region,
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
..
|
||||
}) => {
|
||||
let token = ecr::get_ecr_token(
|
||||
region,
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
)
|
||||
.await
|
||||
.context("failed to get aws ecr token")?;
|
||||
ecr::maybe_create_repo(
|
||||
&to_monitor_name(&build.name),
|
||||
region.to_string(),
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
)
|
||||
.await
|
||||
.context("failed to create aws ecr repo")?;
|
||||
Some(token)
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
Ok((token, config.map(AwsEcrConfig::from)))
|
||||
}
|
||||
ImageRegistry::Custom(_) => {
|
||||
Err(anyhow!("Custom image registry is not implemented"))
|
||||
}
|
||||
async fn validate_account_extract_registry_token(
|
||||
Build {
|
||||
config:
|
||||
BuildConfig {
|
||||
image_registry:
|
||||
ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
},
|
||||
..
|
||||
},
|
||||
..
|
||||
}: &Build,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if domain.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
));
|
||||
}
|
||||
|
||||
let registry_token = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?;
|
||||
|
||||
Ok(registry_token)
|
||||
}
|
||||
|
||||
@@ -2,39 +2,61 @@ use std::collections::HashSet;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
build::{Build, ImageRegistry},
|
||||
config::core::AwsEcrConfig,
|
||||
deployment::{Deployment, DeploymentImage},
|
||||
build::{Build, ImageRegistryConfig},
|
||||
deployment::{
|
||||
extract_registry_domain, Deployment, DeploymentImage,
|
||||
},
|
||||
get_image_name,
|
||||
permission::PermissionLevel,
|
||||
server::ServerState,
|
||||
server::Server,
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
Version,
|
||||
},
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
cloud::aws::ecr,
|
||||
config::core_config,
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
periphery_client,
|
||||
query::{get_global_variables, get_server_with_status},
|
||||
query::get_variables_and_secrets,
|
||||
registry_token,
|
||||
update::update_update,
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
state::{action_states, State},
|
||||
};
|
||||
|
||||
use crate::helpers::update::init_execution_update;
|
||||
async fn setup_deployment_execution(
|
||||
deployment: &str,
|
||||
user: &User,
|
||||
) -> anyhow::Result<(Deployment, Server)> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
deployment,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
|
||||
Ok((deployment, server))
|
||||
}
|
||||
|
||||
impl Resolve<Deploy, (User, Update)> for State {
|
||||
#[instrument(name = "Deploy", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
@@ -47,17 +69,8 @@ impl Resolve<Deploy, (User, Update)> for State {
|
||||
}: Deploy,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let mut deployment =
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
let (mut deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
@@ -70,152 +83,146 @@ impl Resolve<Deploy, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.deploying = true)?;
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
// This block gets the version of the image to deploy in the Build case.
|
||||
// It also gets the name of the image from the build and attaches it directly.
|
||||
let version = match deployment.config.image {
|
||||
periphery
|
||||
.health_check()
|
||||
.await
|
||||
.context("Failed server health check, stopping run.")?;
|
||||
|
||||
// This block resolves the attached Build to an actual versioned image
|
||||
let (version, registry_token) = match &deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
let image_name = get_image_name(&build, |label| {
|
||||
core_config()
|
||||
.aws_ecr_registries
|
||||
.get(label)
|
||||
.map(AwsEcrConfig::from)
|
||||
})
|
||||
.context("failed to create image name")?;
|
||||
let build = resource::get::<Build>(build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version
|
||||
} else {
|
||||
version
|
||||
*version
|
||||
};
|
||||
// Remove ending patch if it is 0, this means use latest patch.
|
||||
let version_str = if version.patch == 0 {
|
||||
format!("{}.{}", version.major, version.minor)
|
||||
} else {
|
||||
version.to_string()
|
||||
};
|
||||
// Potentially add the build image_tag postfix
|
||||
let version_str = if build.config.image_tag.is_empty() {
|
||||
version_str
|
||||
} else {
|
||||
format!("{version_str}-{}", build.config.image_tag)
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version}"),
|
||||
image: format!("{image_name}:{version_str}"),
|
||||
};
|
||||
// set image registry to match build docker account if it's not overridden by deployment
|
||||
if matches!(
|
||||
&deployment.config.image_registry,
|
||||
ImageRegistry::None(_)
|
||||
) {
|
||||
deployment.config.image_registry =
|
||||
build.config.image_registry;
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
(version, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
deployment.config.image_registry_account = account
|
||||
}
|
||||
let token = if !deployment
|
||||
.config
|
||||
.image_registry_account
|
||||
.is_empty()
|
||||
{
|
||||
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(version, token)
|
||||
}
|
||||
version
|
||||
}
|
||||
DeploymentImage::Image { .. } => Version::default(),
|
||||
DeploymentImage::Image { image } => {
|
||||
let domain = extract_registry_domain(image)?;
|
||||
let token = if !deployment
|
||||
.config
|
||||
.image_registry_account
|
||||
.is_empty()
|
||||
{
|
||||
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(Version::default(), token)
|
||||
}
|
||||
};
|
||||
|
||||
let variables = get_global_variables().await?;
|
||||
let core_config = core_config();
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers = if !deployment.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
// Interpolate variables into environment
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
for env in &mut deployment.config.environment {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&env.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
// set env value with the result
|
||||
env.value = res;
|
||||
}
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.into_iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.ports,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.volumes,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.command,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
}
|
||||
if !secret_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
update.version = version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let (registry_token, aws_ecr) = match &deployment
|
||||
.config
|
||||
.image_registry
|
||||
{
|
||||
ImageRegistry::None(_) => (None, None),
|
||||
ImageRegistry::DockerHub(params) => (
|
||||
core_config.docker_accounts.get(¶ms.account).cloned(),
|
||||
None,
|
||||
),
|
||||
ImageRegistry::Ghcr(params) => (
|
||||
core_config.github_accounts.get(¶ms.account).cloned(),
|
||||
None,
|
||||
),
|
||||
ImageRegistry::AwsEcr(label) => {
|
||||
let config = core_config
|
||||
.aws_ecr_registries
|
||||
.get(label)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"did not find config for aws ecr registry {label}"
|
||||
)
|
||||
})?;
|
||||
(
|
||||
Some(
|
||||
ecr::get_ecr_token(
|
||||
&config.region,
|
||||
&config.access_key_id,
|
||||
&config.secret_access_key,
|
||||
)
|
||||
.await
|
||||
.context("failed to create aws ecr login token")?,
|
||||
),
|
||||
Some(AwsEcrConfig::from(config)),
|
||||
)
|
||||
}
|
||||
ImageRegistry::Custom(_) => {
|
||||
return Err(anyhow!("Custom ImageRegistry not yet supported"))
|
||||
}
|
||||
};
|
||||
|
||||
match periphery
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal,
|
||||
stop_time,
|
||||
registry_token,
|
||||
aws_ecr,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
@@ -240,19 +247,15 @@ impl Resolve<Deploy, (User, Update)> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StartContainer, (User, Update)> for State {
|
||||
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
impl Resolve<StartDeployment, (User, Update)> for State {
|
||||
#[instrument(name = "StartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StartContainer { deployment }: StartContainer,
|
||||
StartDeployment { deployment }: StartDeployment,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
@@ -265,23 +268,14 @@ impl Resolve<StartContainer, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.starting = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name.clone(),
|
||||
name: deployment.name,
|
||||
})
|
||||
.await
|
||||
{
|
||||
@@ -301,23 +295,167 @@ impl Resolve<StartContainer, (User, Update)> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StopContainer, (User, Update)> for State {
|
||||
#[instrument(name = "StopContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
impl Resolve<RestartDeployment, (User, Update)> for State {
|
||||
#[instrument(name = "RestartDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopContainer {
|
||||
RestartDeployment { deployment }: RestartDeployment,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.restarting = true)?;
|
||||
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RestartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"restart container",
|
||||
format_serror(
|
||||
&e.context("failed to restart container").into(),
|
||||
),
|
||||
),
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<PauseDeployment, (User, Update)> for State {
|
||||
#[instrument(name = "PauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PauseDeployment { deployment }: PauseDeployment,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.pausing = true)?;
|
||||
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"pause container",
|
||||
format_serror(&e.context("failed to pause container").into()),
|
||||
),
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UnpauseDeployment, (User, Update)> for State {
|
||||
#[instrument(name = "UnpauseDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UnpauseDeployment { deployment }: UnpauseDeployment,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.unpausing = true)?;
|
||||
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::UnpauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"unpause container",
|
||||
format_serror(
|
||||
&e.context("failed to unpause container").into(),
|
||||
),
|
||||
),
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StopDeployment, (User, Update)> for State {
|
||||
#[instrument(name = "StopDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopDeployment {
|
||||
deployment,
|
||||
signal,
|
||||
time,
|
||||
}: StopContainer,
|
||||
}: StopDeployment,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
@@ -330,23 +468,14 @@ impl Resolve<StopContainer, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.stopping = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name.clone(),
|
||||
name: deployment.name,
|
||||
signal: signal
|
||||
.unwrap_or(deployment.config.termination_signal)
|
||||
.into(),
|
||||
@@ -372,111 +501,19 @@ impl Resolve<StopContainer, (User, Update)> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StopAllContainers, (User, Update)> for State {
|
||||
#[instrument(name = "StopAllContainers", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
impl Resolve<DestroyDeployment, (User, Update)> for State {
|
||||
#[instrument(name = "DestroyDeployment", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopAllContainers { server }: StopAllContainers,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let (server, status) = get_server_with_status(&server).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
|
||||
// get the action state for the server (or insert default).
|
||||
let action_state = action_states()
|
||||
.server
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure server not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard = action_state
|
||||
.update(|state| state.stopping_containers = true)?;
|
||||
|
||||
let deployments = find_collect(
|
||||
&db_client().await.deployments,
|
||||
doc! {
|
||||
"config.server_id": &server.id
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to find deployments on server")?;
|
||||
|
||||
let futures = deployments.iter().map(|deployment| async {
|
||||
let req = super::ExecuteRequest::StopContainer(StopContainer {
|
||||
deployment: deployment.id.clone(),
|
||||
signal: None,
|
||||
time: None,
|
||||
});
|
||||
(
|
||||
async {
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
State
|
||||
.resolve(
|
||||
StopContainer {
|
||||
deployment: deployment.id.clone(),
|
||||
signal: None,
|
||||
time: None,
|
||||
},
|
||||
(user.clone(), update),
|
||||
)
|
||||
.await
|
||||
}
|
||||
.await,
|
||||
deployment.name.clone(),
|
||||
deployment.id.clone(),
|
||||
)
|
||||
});
|
||||
let results = join_all(futures).await;
|
||||
let deployment_names = deployments
|
||||
.iter()
|
||||
.map(|d| format!("{} ({})", d.name, d.id))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
update.push_simple_log("stopping containers", deployment_names);
|
||||
for (res, name, id) in results {
|
||||
if let Err(e) = res {
|
||||
update.push_error_log(
|
||||
"stop container failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"failed to stop container {name} ({id})"
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RemoveContainer, (User, Update)> for State {
|
||||
#[instrument(name = "RemoveContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RemoveContainer {
|
||||
DestroyDeployment {
|
||||
deployment,
|
||||
signal,
|
||||
time,
|
||||
}: RemoveContainer,
|
||||
}: DestroyDeployment,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&deployment, &user).await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
@@ -487,25 +524,16 @@ impl Resolve<RemoveContainer, (User, Update)> for State {
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.removing = true)?;
|
||||
action_state.update(|state| state.destroying = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name.clone(),
|
||||
name: deployment.name,
|
||||
signal: signal
|
||||
.unwrap_or(deployment.config.termination_signal)
|
||||
.into(),
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::time::Instant;
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use formatting::format_serror;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
update::{Log, Update},
|
||||
@@ -29,6 +29,7 @@ mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
|
||||
#[typeshare]
|
||||
@@ -38,16 +39,45 @@ mod sync;
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum ExecuteRequest {
|
||||
// ==== SERVER ====
|
||||
StartContainer(StartContainer),
|
||||
RestartContainer(RestartContainer),
|
||||
PauseContainer(PauseContainer),
|
||||
UnpauseContainer(UnpauseContainer),
|
||||
StopContainer(StopContainer),
|
||||
DestroyContainer(DestroyContainer),
|
||||
StartAllContainers(StartAllContainers),
|
||||
RestartAllContainers(RestartAllContainers),
|
||||
PauseAllContainers(PauseAllContainers),
|
||||
UnpauseAllContainers(UnpauseAllContainers),
|
||||
StopAllContainers(StopAllContainers),
|
||||
PruneContainers(PruneContainers),
|
||||
PruneImages(PruneImages),
|
||||
DeleteNetwork(DeleteNetwork),
|
||||
PruneNetworks(PruneNetworks),
|
||||
DeleteImage(DeleteImage),
|
||||
PruneImages(PruneImages),
|
||||
DeleteVolume(DeleteVolume),
|
||||
PruneVolumes(PruneVolumes),
|
||||
PruneDockerBuilders(PruneDockerBuilders),
|
||||
PruneBuildx(PruneBuildx),
|
||||
PruneSystem(PruneSystem),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
StartContainer(StartContainer),
|
||||
StopContainer(StopContainer),
|
||||
StopAllContainers(StopAllContainers),
|
||||
RemoveContainer(RemoveContainer),
|
||||
StartDeployment(StartDeployment),
|
||||
RestartDeployment(RestartDeployment),
|
||||
PauseDeployment(PauseDeployment),
|
||||
UnpauseDeployment(UnpauseDeployment),
|
||||
StopDeployment(StopDeployment),
|
||||
DestroyDeployment(DestroyDeployment),
|
||||
|
||||
// ==== STACK ====
|
||||
DeployStack(DeployStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
StopStack(StopStack),
|
||||
PauseStack(PauseStack),
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
|
||||
// ==== BUILD ====
|
||||
RunBuild(RunBuild),
|
||||
@@ -56,6 +86,8 @@ pub enum ExecuteRequest {
|
||||
// ==== REPO ====
|
||||
CloneRepo(CloneRepo),
|
||||
PullRepo(PullRepo),
|
||||
BuildRepo(BuildRepo),
|
||||
CancelRepoBuild(CancelRepoBuild),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
RunProcedure(RunProcedure),
|
||||
@@ -103,7 +135,7 @@ async fn handler(
|
||||
};
|
||||
let res = async {
|
||||
let mut update =
|
||||
find_one_by_id(&db_client().await.updates, &update_id)
|
||||
find_one_by_id(&db_client().updates, &update_id)
|
||||
.await
|
||||
.context("failed to query to db")?
|
||||
.context("no update exists with given id")?;
|
||||
@@ -129,10 +161,7 @@ async fn task(
|
||||
user: User,
|
||||
update: Update,
|
||||
) -> anyhow::Result<String> {
|
||||
info!(
|
||||
"/execute request {req_id} | user: {} ({})",
|
||||
user.username, user.id
|
||||
);
|
||||
info!("/execute request {req_id} | user: {}", user.username);
|
||||
let timer = Instant::now();
|
||||
|
||||
let res = State
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::pin::Pin;
|
||||
|
||||
use formatting::{bold, colored, format_serror, muted, Color};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::execute::RunProcedure,
|
||||
entities::{
|
||||
permission::PermissionLevel, procedure::Procedure,
|
||||
@@ -50,7 +50,7 @@ fn resolve_inner(
|
||||
// assumes first log is already created
|
||||
// and will panic otherwise.
|
||||
update.push_simple_log(
|
||||
"execute_procedure",
|
||||
"Execute procedure",
|
||||
format!(
|
||||
"{}: executing procedure '{}'",
|
||||
muted("INFO"),
|
||||
@@ -69,6 +69,8 @@ fn resolve_inner(
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let update = Mutex::new(update);
|
||||
|
||||
let res = execute_procedure(&procedure, &update).await;
|
||||
@@ -78,9 +80,9 @@ fn resolve_inner(
|
||||
match res {
|
||||
Ok(_) => {
|
||||
update.push_simple_log(
|
||||
"execution ok",
|
||||
"Execution ok",
|
||||
format!(
|
||||
"{}: the procedure has {} with no errors",
|
||||
"{}: The procedure has {} with no errors",
|
||||
muted("INFO"),
|
||||
colored("completed", Color::Green)
|
||||
),
|
||||
@@ -98,7 +100,7 @@ fn resolve_inner(
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
use anyhow::anyhow;
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
monitor_timestamp, optional_string,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
builder::{Builder, BuilderConfig},
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
@@ -13,18 +17,36 @@ use monitor_client::{
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, to_document},
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{periphery_client, update::update_update},
|
||||
alert::send_alerts,
|
||||
helpers::{
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
channel::repo_cancel_channel,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
update::update_update,
|
||||
},
|
||||
resource::{self, refresh_repo_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
use super::ExecuteRequest;
|
||||
|
||||
impl Resolve<CloneRepo, (User, Update)> for State {
|
||||
#[instrument(name = "CloneRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
@@ -32,7 +54,7 @@ impl Resolve<CloneRepo, (User, Update)> for State {
|
||||
CloneRepo { repo }: CloneRepo,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
@@ -48,28 +70,44 @@ impl Resolve<CloneRepo, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.cloning = true)?;
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if repo.config.server_id.is_empty() {
|
||||
return Err(anyhow!("repo has no server attached"));
|
||||
}
|
||||
|
||||
let git_token = git_token(
|
||||
&repo.config.git_provider,
|
||||
&repo.config.git_account,
|
||||
|https| repo.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
|
||||
)?;
|
||||
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let github_token = core_config()
|
||||
.github_accounts
|
||||
.get(&repo.config.github_account)
|
||||
.cloned();
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers =
|
||||
interpolate(&mut repo, &mut update).await?;
|
||||
|
||||
let logs = match periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&repo).into(),
|
||||
github_token,
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(logs) => logs,
|
||||
Ok(res) => res.logs,
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
"clone repo",
|
||||
@@ -85,7 +123,7 @@ impl Resolve<CloneRepo, (User, Update)> for State {
|
||||
update_last_pulled_time(&repo.name).await;
|
||||
}
|
||||
|
||||
handle_update_return(update).await
|
||||
handle_server_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +134,7 @@ impl Resolve<PullRepo, (User, Update)> for State {
|
||||
PullRepo { repo }: PullRepo,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
@@ -112,25 +150,47 @@ impl Resolve<PullRepo, (User, Update)> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.pulling = true)?;
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if repo.config.server_id.is_empty() {
|
||||
return Err(anyhow!("repo has no server attached"));
|
||||
}
|
||||
|
||||
let git_token = git_token(
|
||||
&repo.config.git_provider,
|
||||
&repo.config.git_account,
|
||||
|https| repo.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
|
||||
)?;
|
||||
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers =
|
||||
interpolate(&mut repo, &mut update).await?;
|
||||
|
||||
let logs = match periphery
|
||||
.request(api::git::PullRepo {
|
||||
name: repo.name.clone(),
|
||||
branch: optional_string(&repo.config.branch),
|
||||
commit: optional_string(&repo.config.commit),
|
||||
on_pull: repo.config.on_pull.into_option(),
|
||||
args: (&repo).into(),
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(logs) => logs,
|
||||
Ok(res) => {
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.logs
|
||||
}
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
"pull repo",
|
||||
@@ -147,12 +207,12 @@ impl Resolve<PullRepo, (User, Update)> for State {
|
||||
update_last_pulled_time(&repo.name).await;
|
||||
}
|
||||
|
||||
handle_update_return(update).await
|
||||
handle_server_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(update_id = update.id))]
|
||||
async fn handle_update_return(
|
||||
async fn handle_server_update_return(
|
||||
update: Update,
|
||||
) -> anyhow::Result<Update> {
|
||||
// Need to manually update the update before cache refresh,
|
||||
@@ -161,7 +221,7 @@ async fn handle_update_return(
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
@@ -176,11 +236,10 @@ async fn handle_update_return(
|
||||
#[instrument]
|
||||
async fn update_last_pulled_time(repo_name: &str) {
|
||||
let res = db_client()
|
||||
.await
|
||||
.repos
|
||||
.update_one(
|
||||
doc! { "name": repo_name },
|
||||
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
|
||||
doc! { "$set": { "info.last_pulled_at": komodo_timestamp() } },
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
@@ -189,3 +248,414 @@ async fn update_last_pulled_time(repo_name: &str) {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<BuildRepo, (User, Update)> for State {
|
||||
#[instrument(name = "BuildRepo", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
BuildRepo { repo }: BuildRepo,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.builder_id.is_empty() {
|
||||
return Err(anyhow!("Must attach builder to BuildRepo"));
|
||||
}
|
||||
|
||||
// get the action state for the repo (or insert default).
|
||||
let action_state =
|
||||
action_states().repo.get_or_insert_default(&repo.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure repo not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.building = true)?;
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token = git_token(
|
||||
&repo.config.git_provider,
|
||||
&repo.config.git_account,
|
||||
|https| repo.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", repo.config.git_provider, repo.config.git_account),
|
||||
)?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
let mut cancel_recv =
|
||||
repo_cancel_channel().receiver.resubscribe();
|
||||
let repo_id = repo.id.clone();
|
||||
|
||||
let builder =
|
||||
resource::get::<Builder>(&repo.config.builder_id).await?;
|
||||
|
||||
let is_server_builder =
|
||||
matches!(&builder.config, BuilderConfig::Server(_));
|
||||
|
||||
tokio::spawn(async move {
|
||||
let poll = async {
|
||||
loop {
|
||||
let (incoming_repo_id, mut update) = tokio::select! {
|
||||
_ = cancel_clone.cancelled() => return Ok(()),
|
||||
id = cancel_recv.recv() => id?
|
||||
};
|
||||
if incoming_repo_id == repo_id {
|
||||
if is_server_builder {
|
||||
update.push_error_log("Cancel acknowledged", "Repo Build cancellation is not possible on server builders at this time. Use an AWS builder to enable this feature.");
|
||||
} else {
|
||||
update.push_simple_log("Cancel acknowledged", "The repo build cancellation has been queued, it may still take some time.");
|
||||
}
|
||||
update.finalize();
|
||||
let id = update.id.clone();
|
||||
if let Err(e) = update_update(update).await {
|
||||
warn!("failed to modify Update {id} on db | {e:#}");
|
||||
}
|
||||
if !is_server_builder {
|
||||
cancel_clone.cancel();
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
#[allow(unreachable_code)]
|
||||
anyhow::Ok(())
|
||||
};
|
||||
tokio::select! {
|
||||
_ = cancel_clone.cancelled() => {}
|
||||
_ = poll => {}
|
||||
}
|
||||
});
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
repo.name.clone(),
|
||||
None,
|
||||
builder,
|
||||
&mut update,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(builder) => builder,
|
||||
Err(e) => {
|
||||
warn!("failed to get builder for repo {} | {e:#}", repo.name);
|
||||
update.logs.push(Log::error(
|
||||
"get builder",
|
||||
format_serror(&e.context("failed to get builder").into()),
|
||||
));
|
||||
return handle_builder_early_return(
|
||||
update, repo.id, repo.name, false,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
};
|
||||
|
||||
// CLONE REPO
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers =
|
||||
interpolate(&mut repo, &mut update).await?;
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&repo).into(),
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect()
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_builder_early_return(update, repo.id, repo.name, true).await
|
||||
},
|
||||
};
|
||||
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
|
||||
let db = db_client();
|
||||
|
||||
if update.success {
|
||||
let _ = db
|
||||
.repos
|
||||
.update_one(
|
||||
doc! { "name": &repo.name },
|
||||
doc! { "$set": {
|
||||
"info.last_built_at": komodo_timestamp(),
|
||||
"info.built_hash": &update.commit_hash,
|
||||
"info.built_message": commit_message
|
||||
}},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// stop the cancel listening task from going forever
|
||||
cancel.cancel();
|
||||
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_repo_state_cache().await;
|
||||
}
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
target,
|
||||
ts: komodo_timestamp(),
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::RepoBuildFailed {
|
||||
id: repo.id,
|
||||
name: repo.name,
|
||||
},
|
||||
};
|
||||
send_alerts(&[alert]).await
|
||||
});
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(update))]
|
||||
async fn handle_builder_early_return(
|
||||
mut update: Update,
|
||||
repo_id: String,
|
||||
repo_name: String,
|
||||
is_cancel: bool,
|
||||
) -> anyhow::Result<Update> {
|
||||
update.finalize();
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_repo_state_cache().await;
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
target,
|
||||
ts: komodo_timestamp(),
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::RepoBuildFailed {
|
||||
id: repo_id,
|
||||
name: repo_name,
|
||||
},
|
||||
};
|
||||
send_alerts(&[alert]).await
|
||||
});
|
||||
}
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn validate_cancel_repo_build(
|
||||
request: &ExecuteRequest,
|
||||
) -> anyhow::Result<()> {
|
||||
if let ExecuteRequest::CancelRepoBuild(req) = request {
|
||||
let repo = resource::get::<Repo>(&req.repo).await?;
|
||||
|
||||
let db = db_client();
|
||||
|
||||
let (latest_build, latest_cancel) = tokio::try_join!(
|
||||
db.updates
|
||||
.find_one(doc! {
|
||||
"operation": "BuildRepo",
|
||||
"target.id": &repo.id,
|
||||
},)
|
||||
.with_options(
|
||||
FindOneOptions::builder()
|
||||
.sort(doc! { "start_ts": -1 })
|
||||
.build()
|
||||
)
|
||||
.into_future(),
|
||||
db.updates
|
||||
.find_one(doc! {
|
||||
"operation": "CancelRepoBuild",
|
||||
"target.id": &repo.id,
|
||||
},)
|
||||
.with_options(
|
||||
FindOneOptions::builder()
|
||||
.sort(doc! { "start_ts": -1 })
|
||||
.build()
|
||||
)
|
||||
.into_future()
|
||||
)?;
|
||||
|
||||
match (latest_build, latest_cancel) {
|
||||
(Some(build), Some(cancel)) => {
|
||||
if cancel.start_ts > build.start_ts {
|
||||
return Err(anyhow!(
|
||||
"Repo build has already been cancelled"
|
||||
));
|
||||
}
|
||||
}
|
||||
(None, _) => return Err(anyhow!("No repo build in progress")),
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Resolve<CancelRepoBuild, (User, Update)> for State {
|
||||
#[instrument(name = "CancelRepoBuild", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CancelRepoBuild { repo }: CancelRepoBuild,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// make sure the build is building
|
||||
if !action_states()
|
||||
.repo
|
||||
.get(&repo.id)
|
||||
.await
|
||||
.and_then(|s| s.get().ok().map(|s| s.building))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
return Err(anyhow!("Repo is not building."));
|
||||
}
|
||||
|
||||
update.push_simple_log(
|
||||
"cancel triggered",
|
||||
"the repo build cancel has been triggered",
|
||||
);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
repo_cancel_channel()
|
||||
.sender
|
||||
.lock()
|
||||
.await
|
||||
.send((repo.id, update.clone()))?;
|
||||
|
||||
// Make sure cancel is set to complete after some time in case
|
||||
// no reciever is there to do it. Prevents update stuck in InProgress.
|
||||
let update_id = update.id.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_secs(60)).await;
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update_id,
|
||||
doc! { "$set": { "status": "Complete" } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!("failed to set CancelRepoBuild Update status Complete after timeout | {e:#}")
|
||||
}
|
||||
});
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
async fn interpolate(
|
||||
repo: &mut Repo,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<HashSet<(String, String)>> {
|
||||
if !repo.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_clone,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_pull,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
Ok(secret_replacers)
|
||||
} else {
|
||||
Ok(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::{execute::LaunchServer, write::CreateServer},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
@@ -34,7 +34,6 @@ impl Resolve<LaunchServer, (User, Update)> for State {
|
||||
) -> anyhow::Result<Update> {
|
||||
// validate name isn't already taken by another server
|
||||
if db_client()
|
||||
.await
|
||||
.servers
|
||||
.find_one(doc! {
|
||||
"name": &name
|
||||
@@ -62,6 +61,8 @@ impl Resolve<LaunchServer, (User, Update)> for State {
|
||||
let config = match template.config {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let region = config.region.clone();
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let instance = match launch_ec2_instance(&name, config).await
|
||||
{
|
||||
Ok(instance) => instance,
|
||||
@@ -82,14 +83,18 @@ impl Resolve<LaunchServer, (User, Update)> for State {
|
||||
instance.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("http://{}:8120", instance.ip).into(),
|
||||
address: format!("{protocol}://{}:{port}", instance.ip)
|
||||
.into(),
|
||||
region: region.into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
let datacenter = config.datacenter;
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let server = match launch_hetzner_server(&name, config).await
|
||||
{
|
||||
Ok(server) => server,
|
||||
@@ -110,8 +115,10 @@ impl Resolve<LaunchServer, (User, Update)> for State {
|
||||
server.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("http://{}:8120", server.ip).into(),
|
||||
address: format!("{protocol}://{}:{port}", server.ip)
|
||||
.into(),
|
||||
region: datacenter.as_ref().to_string().into(),
|
||||
..Default::default()
|
||||
}
|
||||
|
||||
360
bin/core/src/api/execute/stack.rs
Normal file
360
bin/core/src/api/execute/stack.rs
Normal file
@@ -0,0 +1,360 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, stack::StackInfo, update::Update,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, to_document};
|
||||
use periphery_client::api::compose::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
update::update_update,
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
stack::{
|
||||
execute::execute_compose, get_stack_and_server,
|
||||
services::extract_services_into_res,
|
||||
},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<DeployStack, (User, Update)> for State {
|
||||
#[instrument(name = "DeployStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeployStack { stack, stop_time }: DeployStack,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the stack (or insert default).
|
||||
let action_state =
|
||||
action_states().stack.get_or_insert_default(&stack.id).await;
|
||||
|
||||
// Will check to ensure stack not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.deploying = true)?;
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token = crate::helpers::git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
|https| stack.config.git_https = https,
|
||||
).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {}", stack.config.git_provider, stack.config.git_account),
|
||||
)?;
|
||||
|
||||
let registry_token = crate::helpers::registry_token(
|
||||
&stack.config.registry_provider,
|
||||
&stack.config.registry_account,
|
||||
).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
|
||||
)?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers = if !stack.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut stack.config.build_extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let ComposeUpResponse {
|
||||
logs,
|
||||
deployed,
|
||||
file_contents,
|
||||
missing_files,
|
||||
remote_errors,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
} = periphery_client(&server)?
|
||||
.request(ComposeUp {
|
||||
stack: stack.clone(),
|
||||
service: None,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await?;
|
||||
|
||||
update.logs.extend(logs);
|
||||
|
||||
let update_info = async {
|
||||
let latest_services = if !file_contents.is_empty() {
|
||||
let mut services = Vec::new();
|
||||
for contents in &file_contents {
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&stack.project_name(true),
|
||||
&contents.contents,
|
||||
&mut services,
|
||||
) {
|
||||
update.push_error_log(
|
||||
"extract services",
|
||||
format_serror(&e.context(format!("Failed to extract stack services for compose file path {}. Things probably won't work correctly", contents.path)).into())
|
||||
);
|
||||
}
|
||||
}
|
||||
services
|
||||
} else {
|
||||
// maybe better to do something else here for services.
|
||||
stack.info.latest_services.clone()
|
||||
};
|
||||
|
||||
// This ensures to get the latest project name,
|
||||
// as it may have changed since the last deploy.
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let (
|
||||
deployed_services,
|
||||
deployed_contents,
|
||||
deployed_hash,
|
||||
deployed_message,
|
||||
) = if deployed {
|
||||
(
|
||||
Some(latest_services.clone()),
|
||||
Some(file_contents.clone()),
|
||||
commit_hash.clone(),
|
||||
commit_message.clone(),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
stack.info.deployed_services,
|
||||
stack.info.deployed_contents,
|
||||
stack.info.deployed_hash,
|
||||
stack.info.deployed_message,
|
||||
)
|
||||
};
|
||||
|
||||
let info = StackInfo {
|
||||
missing_files,
|
||||
deployed_project_name: project_name.into(),
|
||||
deployed_services,
|
||||
deployed_contents,
|
||||
deployed_hash,
|
||||
deployed_message,
|
||||
latest_services,
|
||||
remote_contents: stack
|
||||
.config
|
||||
.file_contents
|
||||
.is_empty()
|
||||
.then_some(file_contents),
|
||||
remote_errors: stack
|
||||
.config
|
||||
.file_contents
|
||||
.is_empty()
|
||||
.then_some(remote_errors),
|
||||
latest_hash: commit_hash,
|
||||
latest_message: commit_message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
.context("failed to serialize stack info to bson")?;
|
||||
|
||||
db_client()
|
||||
.stacks
|
||||
.update_one(
|
||||
doc! { "name": &stack.name },
|
||||
doc! { "$set": { "info": info } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update stack info on db")?;
|
||||
anyhow::Ok(())
|
||||
};
|
||||
|
||||
// This will be weird with single service deploys. Come back to it.
|
||||
if let Err(e) = update_info.await {
|
||||
update.push_error_log(
|
||||
"refresh stack info",
|
||||
format_serror(
|
||||
&e.context("failed to refresh stack info on db").into(),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// Ensure cached stack state up to date by updating server cache
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StartStack, (User, Update)> for State {
|
||||
#[instrument(name = "StartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StartStack { stack, service }: StartStack,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
execute_compose::<StartStack>(
|
||||
&stack,
|
||||
service,
|
||||
&user,
|
||||
|state| state.starting = true,
|
||||
update,
|
||||
(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RestartStack, (User, Update)> for State {
|
||||
#[instrument(name = "RestartStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RestartStack { stack, service }: RestartStack,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
execute_compose::<RestartStack>(
|
||||
&stack,
|
||||
service,
|
||||
&user,
|
||||
|state| {
|
||||
state.restarting = true;
|
||||
},
|
||||
update,
|
||||
(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<PauseStack, (User, Update)> for State {
|
||||
#[instrument(name = "PauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PauseStack { stack, service }: PauseStack,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
execute_compose::<PauseStack>(
|
||||
&stack,
|
||||
service,
|
||||
&user,
|
||||
|state| state.pausing = true,
|
||||
update,
|
||||
(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UnpauseStack, (User, Update)> for State {
|
||||
#[instrument(name = "UnpauseStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UnpauseStack { stack, service }: UnpauseStack,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
execute_compose::<UnpauseStack>(
|
||||
&stack,
|
||||
service,
|
||||
&user,
|
||||
|state| state.unpausing = true,
|
||||
update,
|
||||
(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StopStack, (User, Update)> for State {
|
||||
#[instrument(name = "StopStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopStack {
|
||||
stack,
|
||||
stop_time,
|
||||
service,
|
||||
}: StopStack,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
execute_compose::<StopStack>(
|
||||
&stack,
|
||||
service,
|
||||
&user,
|
||||
|state| state.stopping = true,
|
||||
update,
|
||||
stop_time,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DestroyStack, (User, Update)> for State {
|
||||
#[instrument(name = "DestroyStack", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DestroyStack {
|
||||
stack,
|
||||
remove_orphans,
|
||||
stop_time,
|
||||
}: DestroyStack,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
execute_compose::<DestroyStack>(
|
||||
&stack,
|
||||
None,
|
||||
&user,
|
||||
|state| state.destroying = true,
|
||||
update,
|
||||
(stop_time, remove_orphans),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
@@ -1,39 +1,43 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::{colored, format_serror, Color};
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::{execute::RunSync, write::RefreshResourceSyncPending},
|
||||
entities::{
|
||||
self,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
monitor_timestamp,
|
||||
deployment::Deployment,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Log, Update},
|
||||
user::{sync_user, User},
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
query::get_id_to_tags,
|
||||
sync::{
|
||||
deployment,
|
||||
resource::{
|
||||
get_updates_for_execution, AllResourcesById, ResourceSync,
|
||||
},
|
||||
},
|
||||
update::update_update,
|
||||
},
|
||||
helpers::{query::get_id_to_tags, update::update_update},
|
||||
resource::{self, refresh_resource_sync_state_cache},
|
||||
state::{db_client, State},
|
||||
sync::{
|
||||
deploy::{
|
||||
build_deploy_cache, deploy_from_cache, SyncDeployParams,
|
||||
},
|
||||
execute::{get_updates_for_execution, ExecuteResourceSync},
|
||||
remote::RemoteResources,
|
||||
AllResourcesById,
|
||||
},
|
||||
};
|
||||
|
||||
impl Resolve<RunSync, (User, Update)> for State {
|
||||
@@ -48,56 +52,100 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
>(&sync, &user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(anyhow!("resource sync repo not configured"));
|
||||
}
|
||||
// Send update here for FE to recheck action state
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let (res, logs, hash, message) =
|
||||
crate::helpers::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
let RemoteResources {
|
||||
resources,
|
||||
logs,
|
||||
hash,
|
||||
message,
|
||||
file_errors,
|
||||
..
|
||||
} = crate::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
update.logs.extend(logs);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let resources = res?;
|
||||
if !file_errors.is_empty() {
|
||||
return Err(anyhow!("Found file errors. Cannot execute sync."))
|
||||
}
|
||||
|
||||
let resources = resources?;
|
||||
|
||||
let all_resources = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
let all_resources = AllResourcesById::load().await?;
|
||||
|
||||
let deployments_by_name = all_resources
|
||||
.deployments
|
||||
.values()
|
||||
.map(|deployment| (deployment.name.clone(), deployment.clone()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let stacks_by_name = all_resources
|
||||
.stacks
|
||||
.values()
|
||||
.map(|stack| (stack.name.clone(), stack.clone()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let deploy_cache = build_deploy_cache(SyncDeployParams {
|
||||
deployments: &resources.deployments,
|
||||
deployment_map: &deployments_by_name,
|
||||
stacks: &resources.stacks,
|
||||
stack_map: &stacks_by_name,
|
||||
all_resources: &all_resources,
|
||||
})
|
||||
.await?;
|
||||
|
||||
let delete = sync.config.managed || sync.config.delete;
|
||||
|
||||
let (servers_to_create, servers_to_update, servers_to_delete) =
|
||||
get_updates_for_execution::<Server>(
|
||||
resources.servers,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
) = deployment::get_updates_for_execution(
|
||||
) = get_updates_for_execution::<Deployment>(
|
||||
resources.deployments,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (stacks_to_create, stacks_to_update, stacks_to_delete) =
|
||||
get_updates_for_execution::<Stack>(
|
||||
resources.stacks,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (builds_to_create, builds_to_update, builds_to_delete) =
|
||||
get_updates_for_execution::<Build>(
|
||||
resources.builds,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (repos_to_create, repos_to_update, repos_to_delete) =
|
||||
get_updates_for_execution::<Repo>(
|
||||
resources.repos,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
@@ -106,25 +154,28 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
procedures_to_delete,
|
||||
) = get_updates_for_execution::<Procedure>(
|
||||
resources.procedures,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (builders_to_create, builders_to_update, builders_to_delete) =
|
||||
get_updates_for_execution::<Builder>(
|
||||
resources.builders,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
|
||||
get_updates_for_execution::<Alerter>(
|
||||
resources.alerters,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
@@ -133,9 +184,10 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
server_templates_to_delete,
|
||||
) = get_updates_for_execution::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
@@ -144,32 +196,36 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
resource_syncs_to_delete,
|
||||
) = get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
variables_to_create,
|
||||
variables_to_update,
|
||||
variables_to_delete,
|
||||
) = crate::helpers::sync::variables::get_updates_for_execution(
|
||||
) = crate::sync::variables::get_updates_for_execution(
|
||||
resources.variables,
|
||||
sync.config.delete,
|
||||
// Delete doesn't work with variables when match tags are set
|
||||
sync.config.match_tags.is_empty() && delete,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
user_groups_to_create,
|
||||
user_groups_to_update,
|
||||
user_groups_to_delete,
|
||||
) = crate::helpers::sync::user_groups::get_updates_for_execution(
|
||||
) = crate::sync::user_groups::get_updates_for_execution(
|
||||
resources.user_groups,
|
||||
sync.config.delete,
|
||||
// Delete doesn't work with user groups when match tags are set
|
||||
sync.config.match_tags.is_empty() && delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if resource_syncs_to_create.is_empty()
|
||||
if deploy_cache.is_empty()
|
||||
&& resource_syncs_to_create.is_empty()
|
||||
&& resource_syncs_to_update.is_empty()
|
||||
&& resource_syncs_to_delete.is_empty()
|
||||
&& server_templates_to_create.is_empty()
|
||||
@@ -181,6 +237,9 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
&& deployments_to_create.is_empty()
|
||||
&& deployments_to_update.is_empty()
|
||||
&& deployments_to_delete.is_empty()
|
||||
&& stacks_to_create.is_empty()
|
||||
&& stacks_to_update.is_empty()
|
||||
&& stacks_to_delete.is_empty()
|
||||
&& builds_to_create.is_empty()
|
||||
&& builds_to_update.is_empty()
|
||||
&& builds_to_delete.is_empty()
|
||||
@@ -220,7 +279,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
// No deps
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
crate::helpers::sync::variables::run_updates(
|
||||
crate::sync::variables::run_updates(
|
||||
variables_to_create,
|
||||
variables_to_update,
|
||||
variables_to_delete,
|
||||
@@ -229,7 +288,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
crate::helpers::sync::user_groups::run_updates(
|
||||
crate::sync::user_groups::run_updates(
|
||||
user_groups_to_create,
|
||||
user_groups_to_update,
|
||||
user_groups_to_delete,
|
||||
@@ -238,7 +297,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
entities::sync::ResourceSync::run_updates(
|
||||
ResourceSync::execute_sync_updates(
|
||||
resource_syncs_to_create,
|
||||
resource_syncs_to_update,
|
||||
resource_syncs_to_delete,
|
||||
@@ -247,7 +306,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
ServerTemplate::run_updates(
|
||||
ServerTemplate::execute_sync_updates(
|
||||
server_templates_to_create,
|
||||
server_templates_to_update,
|
||||
server_templates_to_delete,
|
||||
@@ -256,7 +315,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Server::run_updates(
|
||||
Server::execute_sync_updates(
|
||||
servers_to_create,
|
||||
servers_to_update,
|
||||
servers_to_delete,
|
||||
@@ -265,7 +324,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Alerter::run_updates(
|
||||
Alerter::execute_sync_updates(
|
||||
alerters_to_create,
|
||||
alerters_to_update,
|
||||
alerters_to_delete,
|
||||
@@ -276,7 +335,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
// Dependent on server
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Builder::run_updates(
|
||||
Builder::execute_sync_updates(
|
||||
builders_to_create,
|
||||
builders_to_update,
|
||||
builders_to_delete,
|
||||
@@ -285,7 +344,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Repo::run_updates(
|
||||
Repo::execute_sync_updates(
|
||||
repos_to_create,
|
||||
repos_to_update,
|
||||
repos_to_delete,
|
||||
@@ -296,7 +355,7 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
// Dependant on builder
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Build::run_updates(
|
||||
Build::execute_sync_updates(
|
||||
builds_to_create,
|
||||
builds_to_update,
|
||||
builds_to_delete,
|
||||
@@ -305,20 +364,30 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
);
|
||||
|
||||
// Dependant on server / build
|
||||
if let Some(res) = deployment::run_updates(
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
)
|
||||
.await
|
||||
{
|
||||
update.logs.extend(res);
|
||||
}
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Deployment::execute_sync_updates(
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
// stack only depends on server, but maybe will depend on build later.
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Stack::execute_sync_updates(
|
||||
stacks_to_create,
|
||||
stacks_to_update,
|
||||
stacks_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on everything
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Procedure::run_updates(
|
||||
Procedure::execute_sync_updates(
|
||||
procedures_to_create,
|
||||
procedures_to_update,
|
||||
procedures_to_delete,
|
||||
@@ -326,14 +395,17 @@ impl Resolve<RunSync, (User, Update)> for State {
|
||||
.await,
|
||||
);
|
||||
|
||||
let db = db_client().await;
|
||||
// Execute the deploy cache
|
||||
deploy_from_cache(deploy_cache, &mut update.logs).await;
|
||||
|
||||
let db = db_client();
|
||||
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db.resource_syncs,
|
||||
&sync.id,
|
||||
doc! {
|
||||
"$set": {
|
||||
"info.last_sync_ts": monitor_timestamp(),
|
||||
"info.last_sync_ts": komodo_timestamp(),
|
||||
"info.last_sync_hash": hash,
|
||||
"info.last_sync_message": message,
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
},
|
||||
entities::{update::ResourceTargetVariant, user::User},
|
||||
entities::{deployment::Deployment, server::Server, user::User},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
@@ -14,7 +14,7 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_user,
|
||||
resource::get_resource_ids_for_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
@@ -28,16 +28,10 @@ impl Resolve<ListAlerts, User> for State {
|
||||
) -> anyhow::Result<ListAlertsResponse> {
|
||||
let mut query = query.unwrap_or_default();
|
||||
if !user.admin && !core_config().transparent_mode {
|
||||
let server_ids = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Server,
|
||||
)
|
||||
.await?;
|
||||
let deployment_ids = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Deployment,
|
||||
)
|
||||
.await?;
|
||||
let server_ids =
|
||||
get_resource_ids_for_user::<Server>(&user).await?;
|
||||
let deployment_ids =
|
||||
get_resource_ids_for_user::<Deployment>(&user).await?;
|
||||
query.extend(doc! {
|
||||
"$or": [
|
||||
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
|
||||
@@ -47,7 +41,7 @@ impl Resolve<ListAlerts, User> for State {
|
||||
}
|
||||
|
||||
let alerts = find_collect(
|
||||
&db_client().await.alerts,
|
||||
&db_client().alerts,
|
||||
query,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "ts": -1 })
|
||||
@@ -76,7 +70,7 @@ impl Resolve<GetAlert, User> for State {
|
||||
GetAlert { id }: GetAlert,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetAlertResponse> {
|
||||
find_one_by_id(&db_client().await.alerts, &id)
|
||||
find_one_by_id(&db_client().alerts, &id)
|
||||
.await
|
||||
.context("failed to query db for alert")?
|
||||
.context("no alert found with given id")
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
alerter::{Alerter, AlerterListItem},
|
||||
permission::PermissionLevel,
|
||||
update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_user,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
@@ -59,19 +57,16 @@ impl Resolve<GetAlertersSummary, User> for State {
|
||||
GetAlertersSummary {}: GetAlertersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAlertersSummaryResponse> {
|
||||
let query = match get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Alerter,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let query =
|
||||
match resource::get_resource_ids_for_user::<Alerter>(&user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.alerters
|
||||
.count_documents(query)
|
||||
.await
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::OnceLock,
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use futures::TryStreamExt;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
build::{Build, BuildActionState, BuildListItem, BuildState},
|
||||
@@ -21,7 +18,7 @@ use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::{Resolve, ResolveToString};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
@@ -148,7 +145,6 @@ impl Resolve<GetBuildMonthlyStats, User> for State {
|
||||
let open_ts = close_ts - 30 * ONE_DAY_MS;
|
||||
|
||||
let mut build_updates = db_client()
|
||||
.await
|
||||
.updates
|
||||
.find(doc! {
|
||||
"start_ts": {
|
||||
@@ -193,16 +189,16 @@ fn ms_to_hour(duration: i64) -> f64 {
|
||||
duration as f64 / MS_TO_HOUR_DIVISOR
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildVersions, User> for State {
|
||||
impl Resolve<ListBuildVersions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildVersions {
|
||||
ListBuildVersions {
|
||||
build,
|
||||
major,
|
||||
minor,
|
||||
patch,
|
||||
limit,
|
||||
}: GetBuildVersions,
|
||||
}: ListBuildVersions,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
@@ -232,7 +228,7 @@ impl Resolve<GetBuildVersions, User> for State {
|
||||
}
|
||||
|
||||
let versions = find_collect(
|
||||
&db_client().await.updates,
|
||||
&db_client().updates,
|
||||
filter,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "_id": -1 })
|
||||
@@ -250,42 +246,6 @@ impl Resolve<GetBuildVersions, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
fn github_organizations() -> &'static String {
|
||||
static GITHUB_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
|
||||
GITHUB_ORGANIZATIONS.get_or_init(|| {
|
||||
serde_json::to_string(&core_config().github_organizations)
|
||||
.expect("failed to serialize github organizations")
|
||||
})
|
||||
}
|
||||
|
||||
impl ResolveToString<ListGithubOrganizations, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListGithubOrganizations {}: ListGithubOrganizations,
|
||||
_: User,
|
||||
) -> anyhow::Result<String> {
|
||||
Ok(github_organizations().clone())
|
||||
}
|
||||
}
|
||||
|
||||
fn docker_organizations() -> &'static String {
|
||||
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
|
||||
DOCKER_ORGANIZATIONS.get_or_init(|| {
|
||||
serde_json::to_string(&core_config().docker_organizations)
|
||||
.expect("failed to serialize docker organizations")
|
||||
})
|
||||
}
|
||||
|
||||
impl ResolveToString<ListDockerOrganizations, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListDockerOrganizations {}: ListDockerOrganizations,
|
||||
_: User,
|
||||
) -> anyhow::Result<String> {
|
||||
Ok(docker_organizations().clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListCommonBuildExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -331,7 +291,9 @@ impl Resolve<GetBuildWebhookEnabled, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
if build.config.git_provider != "github.com"
|
||||
|| build.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
@@ -361,11 +323,15 @@ impl Resolve<GetBuildWebhookEnabled, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
|
||||
@@ -1,22 +1,17 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::{
|
||||
api::read::{self, *},
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
builder::{Builder, BuilderConfig, BuilderListItem},
|
||||
builder::{Builder, BuilderListItem},
|
||||
permission::PermissionLevel,
|
||||
update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_user,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
@@ -62,19 +57,16 @@ impl Resolve<GetBuildersSummary, User> for State {
|
||||
GetBuildersSummary {}: GetBuildersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuildersSummaryResponse> {
|
||||
let query = match get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Builder,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let query =
|
||||
match resource::get_resource_ids_for_user::<Builder>(&user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.builders
|
||||
.count_documents(query)
|
||||
.await
|
||||
@@ -85,52 +77,3 @@ impl Resolve<GetBuildersSummary, User> for State {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuilderAvailableAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuilderAvailableAccounts { builder }: GetBuilderAvailableAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuilderAvailableAccountsResponse> {
|
||||
let builder = resource::get_check_permissions::<Builder>(
|
||||
&builder,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let (github, docker) = match builder.config {
|
||||
BuilderConfig::Aws(config) => {
|
||||
(config.github_accounts, config.docker_accounts)
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
let res = self
|
||||
.resolve(
|
||||
read::GetAvailableAccounts {
|
||||
server: Some(config.server_id),
|
||||
},
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
(res.github, res.docker)
|
||||
}
|
||||
};
|
||||
|
||||
let mut github_set = HashSet::<String>::new();
|
||||
|
||||
github_set.extend(core_config().github_accounts.keys().cloned());
|
||||
github_set.extend(github);
|
||||
|
||||
let mut github = github_set.into_iter().collect::<Vec<_>>();
|
||||
github.sort();
|
||||
|
||||
let mut docker_set = HashSet::<String>::new();
|
||||
|
||||
docker_set.extend(core_config().docker_accounts.keys().cloned());
|
||||
docker_set.extend(docker);
|
||||
|
||||
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
|
||||
docker.sort();
|
||||
|
||||
Ok(GetBuilderAvailableAccountsResponse { github, docker })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use std::{cmp, collections::HashSet};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
deployment::{
|
||||
Deployment, DeploymentActionState, DeploymentConfig,
|
||||
DeploymentListItem, DeploymentState, DockerContainerStats,
|
||||
DeploymentListItem, DeploymentState,
|
||||
},
|
||||
docker::container::ContainerStats,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::Log,
|
||||
@@ -84,10 +85,10 @@ impl Resolve<GetDeploymentContainer, User> for State {
|
||||
|
||||
const MAX_LOG_LENGTH: u64 = 5000;
|
||||
|
||||
impl Resolve<GetLog, User> for State {
|
||||
impl Resolve<GetDeploymentLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetLog { deployment, tail }: GetLog,
|
||||
GetDeploymentLog { deployment, tail }: GetDeploymentLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<Log> {
|
||||
let Deployment {
|
||||
@@ -114,15 +115,15 @@ impl Resolve<GetLog, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<SearchLog, User> for State {
|
||||
impl Resolve<SearchDeploymentLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
SearchLog {
|
||||
SearchDeploymentLog {
|
||||
deployment,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
}: SearchLog,
|
||||
}: SearchDeploymentLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<Log> {
|
||||
let Deployment {
|
||||
@@ -156,7 +157,7 @@ impl Resolve<GetDeploymentStats, User> for State {
|
||||
&self,
|
||||
GetDeploymentStats { deployment }: GetDeploymentStats,
|
||||
user: User,
|
||||
) -> anyhow::Result<DockerContainerStats> {
|
||||
) -> anyhow::Result<ContainerStats> {
|
||||
let Deployment {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
@@ -222,14 +223,17 @@ impl Resolve<GetDeploymentsSummary, User> for State {
|
||||
DeploymentState::Running => {
|
||||
res.running += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
res.unknown += 1;
|
||||
DeploymentState::Exited | DeploymentState::Paused => {
|
||||
res.stopped += 1;
|
||||
}
|
||||
DeploymentState::NotDeployed => {
|
||||
res.not_deployed += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
res.unknown += 1;
|
||||
}
|
||||
_ => {
|
||||
res.stopped += 1;
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,33 @@
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
use std::{collections::HashSet, sync::OnceLock, time::Instant};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::read::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, ResolveToString, Resolver};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
build::Build,
|
||||
builder::{Builder, BuilderConfig},
|
||||
config::{DockerRegistry, GitProvider},
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
sync::ResourceSync,
|
||||
user::User,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use resolver_api::{
|
||||
derive::Resolver, Resolve, ResolveToString, Resolver,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{auth::auth_request, config::core_config, state::State};
|
||||
use crate::{
|
||||
auth::auth_request, config::core_config, helpers::periphery_client,
|
||||
resource, state::State,
|
||||
};
|
||||
|
||||
mod alert;
|
||||
mod alerter;
|
||||
@@ -19,10 +36,12 @@ mod builder;
|
||||
mod deployment;
|
||||
mod permission;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod search;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod toml;
|
||||
@@ -41,8 +60,9 @@ enum ReadRequest {
|
||||
GetVersion(GetVersion),
|
||||
#[to_string_resolver]
|
||||
GetCoreInfo(GetCoreInfo),
|
||||
#[to_string_resolver]
|
||||
GetAvailableAwsEcrLabels(GetAvailableAwsEcrLabels),
|
||||
ListSecrets(ListSecrets),
|
||||
ListGitProvidersFromConfig(ListGitProvidersFromConfig),
|
||||
ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig),
|
||||
|
||||
// ==== USER ====
|
||||
GetUsername(GetUsername),
|
||||
@@ -79,15 +99,28 @@ enum ReadRequest {
|
||||
GetServer(GetServer),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetDockerContainers(GetDockerContainers),
|
||||
GetDockerImages(GetDockerImages),
|
||||
GetDockerNetworks(GetDockerNetworks),
|
||||
GetServerActionState(GetServerActionState),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
GetAvailableAccounts(GetAvailableAccounts),
|
||||
GetAvailableSecrets(GetAvailableSecrets),
|
||||
ListServers(ListServers),
|
||||
ListFullServers(ListFullServers),
|
||||
InspectDockerContainer(InspectDockerContainer),
|
||||
GetResourceMatchingContainer(GetResourceMatchingContainer),
|
||||
GetContainerLog(GetContainerLog),
|
||||
SearchContainerLog(SearchContainerLog),
|
||||
InspectDockerNetwork(InspectDockerNetwork),
|
||||
InspectDockerImage(InspectDockerImage),
|
||||
ListDockerImageHistory(ListDockerImageHistory),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
#[to_string_resolver]
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
#[to_string_resolver]
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
#[to_string_resolver]
|
||||
ListDockerImages(ListDockerImages),
|
||||
#[to_string_resolver]
|
||||
ListDockerVolumes(ListDockerVolumes),
|
||||
#[to_string_resolver]
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
GetDeploymentsSummary(GetDeploymentsSummary),
|
||||
@@ -95,8 +128,8 @@ enum ReadRequest {
|
||||
GetDeploymentContainer(GetDeploymentContainer),
|
||||
GetDeploymentActionState(GetDeploymentActionState),
|
||||
GetDeploymentStats(GetDeploymentStats),
|
||||
GetLog(GetLog),
|
||||
SearchLog(SearchLog),
|
||||
GetDeploymentLog(GetDeploymentLog),
|
||||
SearchDeploymentLog(SearchDeploymentLog),
|
||||
ListDeployments(ListDeployments),
|
||||
ListFullDeployments(ListFullDeployments),
|
||||
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
|
||||
@@ -106,15 +139,11 @@ enum ReadRequest {
|
||||
GetBuild(GetBuild),
|
||||
GetBuildActionState(GetBuildActionState),
|
||||
GetBuildMonthlyStats(GetBuildMonthlyStats),
|
||||
GetBuildVersions(GetBuildVersions),
|
||||
ListBuildVersions(ListBuildVersions),
|
||||
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
|
||||
ListBuilds(ListBuilds),
|
||||
ListFullBuilds(ListFullBuilds),
|
||||
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
|
||||
#[to_string_resolver]
|
||||
ListGithubOrganizations(ListGithubOrganizations),
|
||||
#[to_string_resolver]
|
||||
ListDockerOrganizations(ListDockerOrganizations),
|
||||
|
||||
// ==== REPO ====
|
||||
GetReposSummary(GetReposSummary),
|
||||
@@ -132,10 +161,22 @@ enum ReadRequest {
|
||||
ListResourceSyncs(ListResourceSyncs),
|
||||
ListFullResourceSyncs(ListFullResourceSyncs),
|
||||
|
||||
// ==== STACK ====
|
||||
GetStacksSummary(GetStacksSummary),
|
||||
GetStack(GetStack),
|
||||
GetStackActionState(GetStackActionState),
|
||||
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
|
||||
GetStackServiceLog(GetStackServiceLog),
|
||||
SearchStackServiceLog(SearchStackServiceLog),
|
||||
ListStacks(ListStacks),
|
||||
ListFullStacks(ListFullStacks),
|
||||
ListStackServices(ListStackServices),
|
||||
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
|
||||
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
|
||||
|
||||
// ==== BUILDER ====
|
||||
GetBuildersSummary(GetBuildersSummary),
|
||||
GetBuilder(GetBuilder),
|
||||
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
|
||||
ListBuilders(ListBuilders),
|
||||
ListFullBuilders(ListFullBuilders),
|
||||
|
||||
@@ -167,11 +208,17 @@ enum ReadRequest {
|
||||
#[to_string_resolver]
|
||||
GetSystemStats(GetSystemStats),
|
||||
#[to_string_resolver]
|
||||
GetSystemProcesses(GetSystemProcesses),
|
||||
ListSystemProcesses(ListSystemProcesses),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
GetVariable(GetVariable),
|
||||
ListVariables(ListVariables),
|
||||
|
||||
// ==== PROVIDER ====
|
||||
GetGitProviderAccount(GetGitProviderAccount),
|
||||
ListGitProviderAccounts(ListGitProviderAccounts),
|
||||
GetDockerRegistryAccount(GetDockerRegistryAccount),
|
||||
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -234,12 +281,15 @@ fn core_info() -> &'static String {
|
||||
let info = GetCoreInfoResponse {
|
||||
title: config.title.clone(),
|
||||
monitoring_interval: config.monitoring_interval,
|
||||
github_webhook_base_url: config
|
||||
.github_webhook_base_url
|
||||
.clone()
|
||||
.unwrap_or_else(|| config.host.clone()),
|
||||
webhook_base_url: if config.webhook_base_url.is_empty() {
|
||||
config.host.clone()
|
||||
} else {
|
||||
config.webhook_base_url.clone()
|
||||
},
|
||||
transparent_mode: config.transparent_mode,
|
||||
ui_write_disabled: config.ui_write_disabled,
|
||||
disable_confirm_dialog: config.disable_confirm_dialog,
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
github_webhook_owners: config
|
||||
.github_webhook_app
|
||||
.installations
|
||||
@@ -263,27 +313,262 @@ impl ResolveToString<GetCoreInfo, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
fn ecr_labels() -> &'static String {
|
||||
static ECR_LABELS: OnceLock<String> = OnceLock::new();
|
||||
ECR_LABELS.get_or_init(|| {
|
||||
serde_json::to_string(
|
||||
&core_config()
|
||||
.aws_ecr_registries
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.context("failed to serialize ecr registries")
|
||||
.unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
impl ResolveToString<GetAvailableAwsEcrLabels, User> for State {
|
||||
async fn resolve_to_string(
|
||||
impl Resolve<ListSecrets, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAvailableAwsEcrLabels {}: GetAvailableAwsEcrLabels,
|
||||
ListSecrets { target }: ListSecrets,
|
||||
_: User,
|
||||
) -> anyhow::Result<String> {
|
||||
Ok(ecr_labels().to_string())
|
||||
) -> anyhow::Result<ListSecretsResponse> {
|
||||
let mut secrets = core_config()
|
||||
.secrets
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
if let Some(target) = target {
|
||||
let server_id = match target {
|
||||
ResourceTarget::Server(id) => Some(id),
|
||||
ResourceTarget::Builder(id) => {
|
||||
match resource::get::<Builder>(&id).await?.config {
|
||||
BuilderConfig::Server(config) => Some(config.server_id),
|
||||
BuilderConfig::Aws(config) => {
|
||||
secrets.extend(config.secrets);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!("target must be `Server` or `Builder`"))
|
||||
}
|
||||
};
|
||||
if let Some(id) = server_id {
|
||||
let server = resource::get::<Server>(&id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
.request(periphery_client::api::ListSecrets {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to get secrets from server {}",
|
||||
server.name
|
||||
)
|
||||
})?;
|
||||
secrets.extend(more);
|
||||
}
|
||||
}
|
||||
|
||||
let mut secrets = secrets.into_iter().collect::<Vec<_>>();
|
||||
secrets.sort();
|
||||
|
||||
Ok(secrets)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListGitProvidersFromConfig, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListGitProvidersFromConfig { target }: ListGitProvidersFromConfig,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListGitProvidersFromConfigResponse> {
|
||||
let mut providers = core_config().git_providers.clone();
|
||||
|
||||
if let Some(target) = target {
|
||||
match target {
|
||||
ResourceTarget::Server(id) => {
|
||||
merge_git_providers_for_server(&mut providers, &id).await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
match resource::get::<Builder>(&id).await?.config {
|
||||
BuilderConfig::Server(config) => {
|
||||
merge_git_providers_for_server(
|
||||
&mut providers,
|
||||
&config.server_id,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
BuilderConfig::Aws(config) => {
|
||||
merge_git_providers(
|
||||
&mut providers,
|
||||
config.git_providers,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!("target must be `Server` or `Builder`"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (builds, repos, syncs) = tokio::try_join!(
|
||||
resource::list_full_for_user::<Build>(
|
||||
Default::default(),
|
||||
&user
|
||||
),
|
||||
resource::list_full_for_user::<Repo>(Default::default(), &user),
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
Default::default(),
|
||||
&user
|
||||
),
|
||||
)?;
|
||||
|
||||
for build in builds {
|
||||
if !providers
|
||||
.iter()
|
||||
.any(|provider| provider.domain == build.config.git_provider)
|
||||
{
|
||||
providers.push(GitProvider {
|
||||
domain: build.config.git_provider,
|
||||
https: build.config.git_https,
|
||||
accounts: Default::default(),
|
||||
});
|
||||
}
|
||||
}
|
||||
for repo in repos {
|
||||
if !providers
|
||||
.iter()
|
||||
.any(|provider| provider.domain == repo.config.git_provider)
|
||||
{
|
||||
providers.push(GitProvider {
|
||||
domain: repo.config.git_provider,
|
||||
https: repo.config.git_https,
|
||||
accounts: Default::default(),
|
||||
});
|
||||
}
|
||||
}
|
||||
for sync in syncs {
|
||||
if !providers
|
||||
.iter()
|
||||
.any(|provider| provider.domain == sync.config.git_provider)
|
||||
{
|
||||
providers.push(GitProvider {
|
||||
domain: sync.config.git_provider,
|
||||
https: sync.config.git_https,
|
||||
accounts: Default::default(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
providers.sort();
|
||||
|
||||
Ok(providers)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListDockerRegistriesFromConfig, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListDockerRegistriesFromConfig { target }: ListDockerRegistriesFromConfig,
|
||||
_: User,
|
||||
) -> anyhow::Result<ListDockerRegistriesFromConfigResponse> {
|
||||
let mut registries = core_config().docker_registries.clone();
|
||||
|
||||
if let Some(target) = target {
|
||||
match target {
|
||||
ResourceTarget::Server(id) => {
|
||||
merge_docker_registries_for_server(&mut registries, &id)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
match resource::get::<Builder>(&id).await?.config {
|
||||
BuilderConfig::Server(config) => {
|
||||
merge_docker_registries_for_server(
|
||||
&mut registries,
|
||||
&config.server_id,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
BuilderConfig::Aws(config) => {
|
||||
merge_docker_registries(
|
||||
&mut registries,
|
||||
config.docker_registries,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow!("target must be `Server` or `Builder`"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
registries.sort();
|
||||
|
||||
Ok(registries)
|
||||
}
|
||||
}
|
||||
|
||||
async fn merge_git_providers_for_server(
|
||||
providers: &mut Vec<GitProvider>,
|
||||
server_id: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
.request(periphery_client::api::ListGitProviders {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to get git providers from server {}",
|
||||
server.name
|
||||
)
|
||||
})?;
|
||||
merge_git_providers(providers, more);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn merge_git_providers(
|
||||
providers: &mut Vec<GitProvider>,
|
||||
more: Vec<GitProvider>,
|
||||
) {
|
||||
for incoming_provider in more {
|
||||
if let Some(provider) = providers
|
||||
.iter_mut()
|
||||
.find(|provider| provider.domain == incoming_provider.domain)
|
||||
{
|
||||
for account in incoming_provider.accounts {
|
||||
if !provider.accounts.contains(&account) {
|
||||
provider.accounts.push(account);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
providers.push(incoming_provider);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn merge_docker_registries_for_server(
|
||||
registries: &mut Vec<DockerRegistry>,
|
||||
server_id: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
.request(periphery_client::api::ListDockerRegistries {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to get docker registries from server {}",
|
||||
server.name
|
||||
)
|
||||
})?;
|
||||
merge_docker_registries(registries, more);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn merge_docker_registries(
|
||||
registries: &mut Vec<DockerRegistry>,
|
||||
more: Vec<DockerRegistry>,
|
||||
) {
|
||||
for incoming_registry in more {
|
||||
if let Some(registry) = registries
|
||||
.iter_mut()
|
||||
.find(|registry| registry.domain == incoming_registry.domain)
|
||||
{
|
||||
for account in incoming_registry.accounts {
|
||||
if !registry.accounts.contains(&account) {
|
||||
registry.accounts.push(account);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
registries.push(incoming_registry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
|
||||
ListPermissionsResponse, ListUserTargetPermissions,
|
||||
@@ -11,7 +11,7 @@ use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user_permission_on_resource,
|
||||
helpers::query::get_user_permission_on_target,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
@@ -22,7 +22,7 @@ impl Resolve<ListPermissions, User> for State {
|
||||
user: User,
|
||||
) -> anyhow::Result<ListPermissionsResponse> {
|
||||
find_collect(
|
||||
&db_client().await.permissions,
|
||||
&db_client().permissions,
|
||||
doc! {
|
||||
"user_target.type": "User",
|
||||
"user_target.id": &user.id
|
||||
@@ -43,8 +43,7 @@ impl Resolve<GetPermissionLevel, User> for State {
|
||||
if user.admin {
|
||||
return Ok(PermissionLevel::Write);
|
||||
}
|
||||
let (variant, id) = target.extract_variant_id();
|
||||
get_user_permission_on_resource(&user, variant, id).await
|
||||
get_user_permission_on_target(&user, &target).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +58,7 @@ impl Resolve<ListUserTargetPermissions, User> for State {
|
||||
}
|
||||
let (variant, id) = user_target.extract_variant_id();
|
||||
find_collect(
|
||||
&db_client().await.permissions,
|
||||
&db_client().permissions,
|
||||
doc! {
|
||||
"user_target.type": variant.as_ref(),
|
||||
"user_target.id": id
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
|
||||
116
bin/core/src/api/read/provider.rs
Normal file
116
bin/core/src/api/read/provider.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetDockerRegistryAccount, GetDockerRegistryAccountResponse,
|
||||
GetGitProviderAccount, GetGitProviderAccountResponse,
|
||||
ListDockerRegistryAccounts, ListDockerRegistryAccountsResponse,
|
||||
ListGitProviderAccounts, ListGitProviderAccountsResponse,
|
||||
},
|
||||
entities::user::User,
|
||||
};
|
||||
use mongo_indexed::{doc, Document};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id, find::find_collect,
|
||||
mongodb::options::FindOptions,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
impl Resolve<GetGitProviderAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetGitProviderAccount { id }: GetGitProviderAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Only admins can read git provider accounts"
|
||||
));
|
||||
}
|
||||
find_one_by_id(&db_client().git_accounts, &id)
|
||||
.await
|
||||
.context("failed to query db for git provider accounts")?
|
||||
.context("did not find git provider account with the given id")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListGitProviderAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListGitProviderAccounts { domain, username }: ListGitProviderAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListGitProviderAccountsResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Only admins can read git provider accounts"
|
||||
));
|
||||
}
|
||||
let mut filter = Document::new();
|
||||
if let Some(domain) = domain {
|
||||
filter.insert("domain", domain);
|
||||
}
|
||||
if let Some(username) = username {
|
||||
filter.insert("username", username);
|
||||
}
|
||||
find_collect(
|
||||
&db_client().git_accounts,
|
||||
filter,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "domain": 1, "username": 1 })
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for git provider accounts")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerRegistryAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerRegistryAccount { id }: GetDockerRegistryAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Only admins can read docker registry accounts"
|
||||
));
|
||||
}
|
||||
find_one_by_id(&db_client().registry_accounts, &id)
|
||||
.await
|
||||
.context("failed to query db for docker registry accounts")?
|
||||
.context(
|
||||
"did not find docker registry account with the given id",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListDockerRegistryAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListDockerRegistryAccounts { domain, username }: ListDockerRegistryAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListDockerRegistryAccountsResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Only admins can read docker registry accounts"
|
||||
));
|
||||
}
|
||||
let mut filter = Document::new();
|
||||
if let Some(domain) = domain {
|
||||
filter.insert("domain", domain);
|
||||
}
|
||||
if let Some(username) = username {
|
||||
filter.insert("username", username);
|
||||
}
|
||||
find_collect(
|
||||
&db_client().registry_accounts,
|
||||
filter,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "domain": 1, "username": 1 })
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for docker registry accounts")
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
@@ -107,11 +107,16 @@ impl Resolve<GetReposSummary, User> for State {
|
||||
(_, action_states) if action_states.pulling => {
|
||||
res.pulling += 1;
|
||||
}
|
||||
(_, action_states) if action_states.building => {
|
||||
res.building += 1;
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _) | (RepoState::Pulling, _) => {
|
||||
(RepoState::Cloning, _)
|
||||
| (RepoState::Pulling, _)
|
||||
| (RepoState::Building, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
@@ -132,6 +137,7 @@ impl Resolve<GetRepoWebhooksEnabled, User> for State {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
@@ -142,11 +148,14 @@ impl Resolve<GetRepoWebhooksEnabled, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
if repo.config.git_provider != "github.com"
|
||||
|| repo.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -158,6 +167,7 @@ impl Resolve<GetRepoWebhooksEnabled, User> for State {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
@@ -174,32 +184,46 @@ impl Resolve<GetRepoWebhooksEnabled, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let clone_url =
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id);
|
||||
let pull_url =
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id);
|
||||
let build_url =
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id);
|
||||
|
||||
let mut clone_enabled = false;
|
||||
let mut pull_enabled = false;
|
||||
let mut build_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == clone_url {
|
||||
if !webhook.active {
|
||||
continue;
|
||||
}
|
||||
if webhook.config.url == clone_url {
|
||||
clone_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == pull_url {
|
||||
if webhook.config.url == pull_url {
|
||||
pull_enabled = true
|
||||
}
|
||||
if webhook.config.url == build_url {
|
||||
build_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
clone_enabled,
|
||||
pull_enabled,
|
||||
build_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{FindResources, FindResourcesResponse},
|
||||
entities::{
|
||||
build::Build, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, update::ResourceTargetVariant,
|
||||
user::User,
|
||||
repo::Repo, server::Server, user::User, ResourceTargetVariant,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
cmp,
|
||||
collections::HashMap,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
@@ -7,30 +8,44 @@ use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{
|
||||
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
|
||||
};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
deployment::ContainerSummary,
|
||||
deployment::Deployment,
|
||||
docker::{
|
||||
container::Container,
|
||||
image::{Image, ImageHistoryResponseItem},
|
||||
network::Network,
|
||||
volume::Volume,
|
||||
},
|
||||
permission::PermissionLevel,
|
||||
server::{
|
||||
docker_image::ImageSummary, docker_network::DockerNetwork,
|
||||
Server, ServerActionState, ServerListItem, ServerState,
|
||||
},
|
||||
stack::{Stack, StackServiceNames},
|
||||
update::Log,
|
||||
user::User,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use periphery_client::api::{self, GetAccountsResponse};
|
||||
use periphery_client::api::{
|
||||
self as periphery,
|
||||
container::InspectContainer,
|
||||
image::{ImageHistory, InspectImage},
|
||||
network::InspectNetwork,
|
||||
volume::InspectVolume,
|
||||
};
|
||||
use resolver_api::{Resolve, ResolveToString};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
stack::compose_container_match_regex,
|
||||
state::{action_states, db_client, server_status_cache, State},
|
||||
};
|
||||
|
||||
@@ -192,7 +207,7 @@ impl ResolveToString<GetSystemInformation, User> for State {
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
.request(api::stats::GetSystemInformation {})
|
||||
.request(periphery::stats::GetSystemInformation {})
|
||||
.await?;
|
||||
let res = serde_json::to_string(&stats)?;
|
||||
lock.insert(
|
||||
@@ -240,10 +255,10 @@ fn processes_cache() -> &'static ProcessesCache {
|
||||
PROCESSES_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl ResolveToString<GetSystemProcesses, User> for State {
|
||||
impl ResolveToString<ListSystemProcesses, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
GetSystemProcesses { server }: GetSystemProcesses,
|
||||
ListSystemProcesses { server }: ListSystemProcesses,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
@@ -259,7 +274,7 @@ impl ResolveToString<GetSystemProcesses, User> for State {
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
.request(api::stats::GetSystemProcesses {})
|
||||
.request(periphery::stats::GetSystemProcesses {})
|
||||
.await?;
|
||||
let res = serde_json::to_string(&stats)?;
|
||||
lock.insert(
|
||||
@@ -306,7 +321,7 @@ impl Resolve<GetHistoricalServerStats, User> for State {
|
||||
}
|
||||
|
||||
let stats = find_collect(
|
||||
&db_client().await.stats,
|
||||
&db_client().stats,
|
||||
doc! {
|
||||
"sid": server.id,
|
||||
"ts": { "$in": ts_vec },
|
||||
@@ -329,12 +344,69 @@ impl Resolve<GetHistoricalServerStats, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerImages, User> for State {
|
||||
impl ResolveToString<ListDockerContainers, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListDockerContainers { server }: ListDockerContainers,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(containers) = &cache.containers {
|
||||
serde_json::to_string(containers)
|
||||
.context("failed to serialize response")
|
||||
} else {
|
||||
Ok(String::from("[]"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<InspectDockerContainer, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerImages { server }: GetDockerImages,
|
||||
InspectDockerContainer { server, container }: InspectDockerContainer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ImageSummary>> {
|
||||
) -> anyhow::Result<Container> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"Cannot inspect container: server is {:?}",
|
||||
cache.state
|
||||
));
|
||||
}
|
||||
periphery_client(&server)?
|
||||
.request(InspectContainer { name: container })
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_LOG_LENGTH: u64 = 5000;
|
||||
|
||||
impl Resolve<GetContainerLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetContainerLog {
|
||||
server,
|
||||
container,
|
||||
tail,
|
||||
}: GetContainerLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<Log> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
@@ -342,17 +414,27 @@ impl Resolve<GetDockerImages, User> for State {
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::build::GetImageList {})
|
||||
.request(periphery::container::GetContainerLog {
|
||||
name: container,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
})
|
||||
.await
|
||||
.context("failed at call to periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerNetworks, User> for State {
|
||||
impl Resolve<SearchContainerLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerNetworks { server }: GetDockerNetworks,
|
||||
SearchContainerLog {
|
||||
server,
|
||||
container,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
}: SearchContainerLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<DockerNetwork>> {
|
||||
) -> anyhow::Result<Log> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
@@ -360,92 +442,278 @@ impl Resolve<GetDockerNetworks, User> for State {
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::network::GetNetworkList {})
|
||||
.request(periphery::container::GetContainerLogSearch {
|
||||
name: container,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
})
|
||||
.await
|
||||
.context("failed at call to periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerContainers, User> for State {
|
||||
impl Resolve<GetResourceMatchingContainer, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerContainers { server }: GetDockerContainers,
|
||||
GetResourceMatchingContainer { server, container }: GetResourceMatchingContainer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ContainerSummary>> {
|
||||
) -> anyhow::Result<GetResourceMatchingContainerResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::container::GetContainerList {})
|
||||
.await
|
||||
}
|
||||
}
|
||||
// first check deployments
|
||||
if let Ok(deployment) =
|
||||
resource::get::<Deployment>(&container).await
|
||||
{
|
||||
return Ok(GetResourceMatchingContainerResponse {
|
||||
resource: ResourceTarget::Deployment(deployment.id).into(),
|
||||
});
|
||||
}
|
||||
|
||||
impl Resolve<GetAvailableAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAvailableAccounts { server }: GetAvailableAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAvailableAccountsResponse> {
|
||||
let (github, docker) = match server {
|
||||
Some(server) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// then check stacks
|
||||
let stacks =
|
||||
resource::list_full_for_user_using_document::<Stack>(
|
||||
doc! { "config.server_id": &server.id },
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let GetAccountsResponse { github, docker } =
|
||||
periphery_client(&server)?
|
||||
.request(api::GetAccounts {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
(github, docker)
|
||||
// check matching stack
|
||||
for stack in stacks {
|
||||
for StackServiceNames {
|
||||
service_name,
|
||||
container_name,
|
||||
} in stack
|
||||
.info
|
||||
.deployed_services
|
||||
.unwrap_or(stack.info.latest_services)
|
||||
{
|
||||
let is_match = match compose_container_match_regex(&container_name)
|
||||
.with_context(|| format!("failed to construct container name matching regex for service {service_name}"))
|
||||
{
|
||||
Ok(regex) => regex,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
continue;
|
||||
}
|
||||
}.is_match(&container);
|
||||
|
||||
if is_match {
|
||||
return Ok(GetResourceMatchingContainerResponse {
|
||||
resource: ResourceTarget::Stack(stack.id).into(),
|
||||
});
|
||||
}
|
||||
}
|
||||
None => Default::default(),
|
||||
};
|
||||
}
|
||||
|
||||
let mut github_set = HashSet::<String>::new();
|
||||
|
||||
github_set.extend(core_config().github_accounts.keys().cloned());
|
||||
github_set.extend(github);
|
||||
|
||||
let mut github = github_set.into_iter().collect::<Vec<_>>();
|
||||
github.sort();
|
||||
|
||||
let mut docker_set = HashSet::<String>::new();
|
||||
|
||||
docker_set.extend(core_config().docker_accounts.keys().cloned());
|
||||
docker_set.extend(docker);
|
||||
|
||||
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
|
||||
docker.sort();
|
||||
|
||||
let res = GetAvailableAccountsResponse { github, docker };
|
||||
Ok(res)
|
||||
Ok(GetResourceMatchingContainerResponse { resource: None })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetAvailableSecrets, User> for State {
|
||||
async fn resolve(
|
||||
impl ResolveToString<ListDockerNetworks, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
GetAvailableSecrets { server }: GetAvailableSecrets,
|
||||
ListDockerNetworks { server }: ListDockerNetworks,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAvailableSecretsResponse> {
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let mut secrets = periphery_client(&server)?
|
||||
.request(api::GetSecrets {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
secrets.sort();
|
||||
Ok(secrets)
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(networks) = &cache.networks {
|
||||
serde_json::to_string(networks)
|
||||
.context("failed to serialize response")
|
||||
} else {
|
||||
Ok(String::from("[]"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<InspectDockerNetwork, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
InspectDockerNetwork { server, network }: InspectDockerNetwork,
|
||||
user: User,
|
||||
) -> anyhow::Result<Network> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"Cannot inspect network: server is {:?}",
|
||||
cache.state
|
||||
));
|
||||
}
|
||||
periphery_client(&server)?
|
||||
.request(InspectNetwork { name: network })
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl ResolveToString<ListDockerImages, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListDockerImages { server }: ListDockerImages,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(images) = &cache.images {
|
||||
serde_json::to_string(images)
|
||||
.context("failed to serialize response")
|
||||
} else {
|
||||
Ok(String::from("[]"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<InspectDockerImage, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
InspectDockerImage { server, image }: InspectDockerImage,
|
||||
user: User,
|
||||
) -> anyhow::Result<Image> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"Cannot inspect image: server is {:?}",
|
||||
cache.state
|
||||
));
|
||||
}
|
||||
periphery_client(&server)?
|
||||
.request(InspectImage { name: image })
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListDockerImageHistory, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListDockerImageHistory { server, image }: ListDockerImageHistory,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ImageHistoryResponseItem>> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"Cannot get image history: server is {:?}",
|
||||
cache.state
|
||||
));
|
||||
}
|
||||
periphery_client(&server)?
|
||||
.request(ImageHistory { name: image })
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl ResolveToString<ListDockerVolumes, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListDockerVolumes { server }: ListDockerVolumes,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(volumes) = &cache.volumes {
|
||||
serde_json::to_string(volumes)
|
||||
.context("failed to serialize response")
|
||||
} else {
|
||||
Ok(String::from("[]"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<InspectDockerVolume, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
InspectDockerVolume { server, volume }: InspectDockerVolume,
|
||||
user: User,
|
||||
) -> anyhow::Result<Volume> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"Cannot inspect volume: server is {:?}",
|
||||
cache.state
|
||||
));
|
||||
}
|
||||
periphery_client(&server)?
|
||||
.request(InspectVolume { name: volume })
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl ResolveToString<ListComposeProjects, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListComposeProjects { server }: ListComposeProjects,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(projects) = &cache.projects {
|
||||
serde_json::to_string(projects)
|
||||
.context("failed to serialize response")
|
||||
} else {
|
||||
Ok(String::from("[]"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
update::ResourceTargetVariant, user::User,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_user,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
@@ -57,10 +56,9 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
|
||||
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
|
||||
let query = match get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::ServerTemplate,
|
||||
)
|
||||
let query = match resource::get_resource_ids_for_user::<
|
||||
ServerTemplate,
|
||||
>(&user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
@@ -69,7 +67,6 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
|
||||
None => Document::new(),
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.server_templates
|
||||
.count_documents(query)
|
||||
.await
|
||||
|
||||
338
bin/core/src/api/read/stack.rs
Normal file
338
bin/core/src/api/read/stack.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
stack::{Stack, StackActionState, StackListItem, StackState},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeServiceLog, GetComposeServiceLogSearch,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
stack::get_stack_and_server,
|
||||
state::{action_states, github_client, stack_status_cache, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetStack, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetStack { stack }: GetStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListStackServices, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListStackServices { stack }: ListStackServices,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListStackServicesResponse> {
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let services = stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.curr
|
||||
.services
|
||||
.clone();
|
||||
|
||||
Ok(services)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetStackServiceLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetStackServiceLog {
|
||||
stack,
|
||||
service,
|
||||
tail,
|
||||
}: GetStackServiceLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetStackServiceLogResponse> {
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(GetComposeServiceLog {
|
||||
project: stack.project_name(false),
|
||||
service,
|
||||
tail,
|
||||
})
|
||||
.await
|
||||
.context("failed to get stack service log from periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<SearchStackServiceLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
SearchStackServiceLog {
|
||||
stack,
|
||||
service,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
}: SearchStackServiceLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<SearchStackServiceLogResponse> {
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(GetComposeServiceLogSearch {
|
||||
project: stack.project_name(false),
|
||||
service,
|
||||
terms,
|
||||
combinator,
|
||||
invert,
|
||||
})
|
||||
.await
|
||||
.context("failed to get stack service log from periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListCommonStackExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListCommonStackExtraArgs { query }: ListCommonStackExtraArgs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListCommonStackExtraArgsResponse> {
|
||||
let stacks = resource::list_full_for_user::<Stack>(query, &user)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
|
||||
// first collect with guaranteed uniqueness
|
||||
let mut res = HashSet::<String>::new();
|
||||
|
||||
for stack in stacks {
|
||||
for extra_arg in stack.config.extra_args {
|
||||
res.insert(extra_arg);
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = res.into_iter().collect::<Vec<_>>();
|
||||
res.sort();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListCommonStackBuildExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListCommonStackBuildExtraArgs { query }: ListCommonStackBuildExtraArgs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListCommonStackBuildExtraArgsResponse> {
|
||||
let stacks = resource::list_full_for_user::<Stack>(query, &user)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
|
||||
// first collect with guaranteed uniqueness
|
||||
let mut res = HashSet::<String>::new();
|
||||
|
||||
for stack in stacks {
|
||||
for extra_arg in stack.config.build_extra_args {
|
||||
res.insert(extra_arg);
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = res.into_iter().collect::<Vec<_>>();
|
||||
res.sort();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListStacks, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListStacks { query }: ListStacks,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<StackListItem>> {
|
||||
resource::list_for_user::<Stack>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListFullStacks, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullStacks { query }: ListFullStacks,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullStacksResponse> {
|
||||
resource::list_full_for_user::<Stack>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetStackActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetStackActionState { stack }: GetStackActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<StackActionState> {
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.stack
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetStacksSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetStacksSummary {}: GetStacksSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetStacksSummaryResponse> {
|
||||
let stacks = resource::list_full_for_user::<Stack>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await
|
||||
.context("failed to get stacks from db")?;
|
||||
|
||||
let mut res = GetStacksSummaryResponse::default();
|
||||
|
||||
let cache = stack_status_cache();
|
||||
|
||||
for stack in stacks {
|
||||
res.total += 1;
|
||||
match cache.get(&stack.id).await.unwrap_or_default().curr.state
|
||||
{
|
||||
StackState::Running => res.running += 1,
|
||||
StackState::Stopped | StackState::Paused => res.stopped += 1,
|
||||
StackState::Down => res.down += 1,
|
||||
StackState::Unknown => res.unknown += 1,
|
||||
_ => res.unhealthy += 1,
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetStackWebhooksEnabled, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetStackWebhooksEnabled { stack }: GetStackWebhooksEnabled,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetStackWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com"
|
||||
|| stack.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id);
|
||||
let deploy_url =
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut deploy_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == deploy_url {
|
||||
deploy_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
deploy_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
sync::{
|
||||
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
|
||||
ResourceSyncListItem, ResourceSyncState,
|
||||
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
|
||||
ResourceSyncState,
|
||||
},
|
||||
user::User,
|
||||
},
|
||||
@@ -100,17 +100,18 @@ impl Resolve<GetResourceSyncsSummary, User> for State {
|
||||
for resource_sync in resource_syncs {
|
||||
res.total += 1;
|
||||
|
||||
match resource_sync.info.pending.data {
|
||||
PendingSyncUpdatesData::Ok(data) => {
|
||||
if !data.no_updates() {
|
||||
res.pending += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
PendingSyncUpdatesData::Err(_) => {
|
||||
res.failed += 1;
|
||||
continue;
|
||||
}
|
||||
if !(resource_sync.info.pending_deploy.to_deploy == 0
|
||||
&& resource_sync.info.resource_updates.is_empty()
|
||||
&& resource_sync.info.variable_updates.is_empty()
|
||||
&& resource_sync.info.user_group_updates.is_empty())
|
||||
{
|
||||
res.pending += 1;
|
||||
continue;
|
||||
} else if resource_sync.info.pending_error.is_some()
|
||||
|| !resource_sync.info.remote_errors.is_empty()
|
||||
{
|
||||
res.failed += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
match (
|
||||
@@ -163,7 +164,9 @@ impl Resolve<GetSyncWebhooksEnabled, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
if sync.config.git_provider != "github.com"
|
||||
|| sync.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
@@ -195,11 +198,15 @@ impl Resolve<GetSyncWebhooksEnabled, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id);
|
||||
let sync_url =
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{GetTag, ListTags},
|
||||
entities::{tag::Tag, user::User},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -29,7 +29,7 @@ impl Resolve<ListTags, User> for State {
|
||||
_: User,
|
||||
) -> anyhow::Result<Vec<Tag>> {
|
||||
find_collect(
|
||||
&db_client().await.tags,
|
||||
&db_client().tags,
|
||||
query,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
|
||||
@@ -1,48 +1,41 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
execute::Execution,
|
||||
read::{
|
||||
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
|
||||
ExportResourcesToToml, ExportResourcesToTomlResponse,
|
||||
GetUserGroup, ListUserTargetPermissions,
|
||||
},
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
|
||||
ExportResourcesToToml, ExportResourcesToTomlResponse,
|
||||
GetUserGroup, ListUserTargetPermissions,
|
||||
},
|
||||
entities::{
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::{
|
||||
conversions_to_string, term_signal_labels_to_string,
|
||||
Deployment, DeploymentImage,
|
||||
},
|
||||
environment_vars_to_string,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
permission::{PermissionLevel, UserTarget},
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
resource::{Resource, ResourceQuery},
|
||||
resource::ResourceQuery,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
toml::{
|
||||
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
|
||||
},
|
||||
update::ResourceTarget,
|
||||
toml::{PermissionToml, ResourcesToml, UserGroupToml},
|
||||
user::User,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use ordered_hash_map::OrderedHashMap;
|
||||
use partial_derive2::PartialDiff;
|
||||
use resolver_api::Resolve;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user_user_group_ids,
|
||||
resource::{self, MonitorResource},
|
||||
helpers::query::{get_id_to_tags, get_user_user_group_ids},
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
sync::{
|
||||
toml::{convert_resource, ToToml, TOML_PRETTY_OPTIONS},
|
||||
AllResourcesById,
|
||||
},
|
||||
};
|
||||
|
||||
impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
@@ -89,6 +82,15 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Stack>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Stack(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Build>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
@@ -126,17 +128,18 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<ResourceSync>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
// These will already be filtered by [ExportResourcesToToml]
|
||||
.map(|resource| ResourceTarget::ResourceSync(resource.id)),
|
||||
);
|
||||
|
||||
let user_groups = if user.admin {
|
||||
find_collect(&db_client().await.user_groups, None, None)
|
||||
let user_groups = if user.admin && tags.is_empty() {
|
||||
find_collect(&db_client().user_groups, None, None)
|
||||
.await
|
||||
.context("failed to query db for user groups")?
|
||||
.into_iter()
|
||||
@@ -151,7 +154,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
ExportResourcesToToml {
|
||||
targets,
|
||||
user_groups,
|
||||
include_variables: true,
|
||||
include_variables: tags.is_empty(),
|
||||
},
|
||||
user,
|
||||
)
|
||||
@@ -170,9 +173,8 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
user: User,
|
||||
) -> anyhow::Result<ExportResourcesToTomlResponse> {
|
||||
let mut res = ResourcesToml::default();
|
||||
let names = ResourceNames::new()
|
||||
.await
|
||||
.context("failed to init resource name maps")?;
|
||||
let all = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
for target in targets {
|
||||
match target {
|
||||
ResourceTarget::Alerter(id) => {
|
||||
@@ -184,7 +186,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
.await?;
|
||||
res
|
||||
.alerters
|
||||
.push(convert_resource::<Alerter>(alerter, &names.tags))
|
||||
.push(convert_resource::<Alerter>(alerter, &id_to_tags))
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
@@ -193,9 +195,15 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
res
|
||||
.resource_syncs
|
||||
.push(convert_resource::<ResourceSync>(sync, &names.tags))
|
||||
if sync.config.file_contents.is_empty()
|
||||
&& (sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty())
|
||||
{
|
||||
res.resource_syncs.push(convert_resource::<ResourceSync>(
|
||||
sync,
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
let template = resource::get_check_permissions::<
|
||||
@@ -205,7 +213,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
)
|
||||
.await?;
|
||||
res.server_templates.push(
|
||||
convert_resource::<ServerTemplate>(template, &names.tags),
|
||||
convert_resource::<ServerTemplate>(template, &id_to_tags),
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
@@ -217,7 +225,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
.await?;
|
||||
res
|
||||
.servers
|
||||
.push(convert_resource::<Server>(server, &names.tags))
|
||||
.push(convert_resource::<Server>(server, &id_to_tags))
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder =
|
||||
@@ -227,18 +235,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// replace server id of builder
|
||||
if let BuilderConfig::Server(config) = &mut builder.config {
|
||||
config.server_id.clone_from(
|
||||
names
|
||||
.servers
|
||||
.get(&config.server_id)
|
||||
.unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
Builder::replace_ids(&mut builder, &all);
|
||||
res
|
||||
.builders
|
||||
.push(convert_resource::<Builder>(builder, &names.tags))
|
||||
.push(convert_resource::<Builder>(builder, &id_to_tags))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
@@ -247,16 +247,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// replace builder id of build
|
||||
build.config.builder_id.clone_from(
|
||||
names
|
||||
.builders
|
||||
.get(&build.config.builder_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
Build::replace_ids(&mut build, &all);
|
||||
res
|
||||
.builds
|
||||
.push(convert_resource::<Build>(build, &names.tags))
|
||||
.push(convert_resource::<Build>(build, &id_to_tags))
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
let mut deployment = resource::get_check_permissions::<
|
||||
@@ -265,24 +259,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
// replace deployment server with name
|
||||
deployment.config.server_id.clone_from(
|
||||
names
|
||||
.servers
|
||||
.get(&deployment.config.server_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
// replace deployment build id with name
|
||||
if let DeploymentImage::Build { build_id, .. } =
|
||||
&mut deployment.config.image
|
||||
{
|
||||
build_id.clone_from(
|
||||
names.builds.get(build_id).unwrap_or(&String::new()),
|
||||
);
|
||||
}
|
||||
Deployment::replace_ids(&mut deployment, &all);
|
||||
res.deployments.push(convert_resource::<Deployment>(
|
||||
deployment,
|
||||
&names.tags,
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
@@ -292,217 +272,71 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// replace repo server with name
|
||||
repo.config.server_id.clone_from(
|
||||
names
|
||||
.servers
|
||||
.get(&repo.config.server_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
res.repos.push(convert_resource::<Repo>(repo, &names.tags))
|
||||
Repo::replace_ids(&mut repo, &all);
|
||||
res.repos.push(convert_resource::<Repo>(repo, &id_to_tags))
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
let mut stack = resource::get_check_permissions::<Stack>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Stack::replace_ids(&mut stack, &all);
|
||||
res
|
||||
.stacks
|
||||
.push(convert_resource::<Stack>(stack, &id_to_tags))
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
add_procedure(&id, &mut res, &user, &names)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to add procedure {id}")
|
||||
})?;
|
||||
let mut procedure = resource::get_check_permissions::<
|
||||
Procedure,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
Procedure::replace_ids(&mut procedure, &all);
|
||||
res.procedures.push(convert_resource::<Procedure>(
|
||||
procedure,
|
||||
&id_to_tags,
|
||||
));
|
||||
}
|
||||
ResourceTarget::System(_) => continue,
|
||||
};
|
||||
}
|
||||
|
||||
add_user_groups(user_groups, &mut res, &names, &user)
|
||||
add_user_groups(user_groups, &mut res, &all, &user)
|
||||
.await
|
||||
.context("failed to add user groups")?;
|
||||
|
||||
if include_variables {
|
||||
res.variables =
|
||||
find_collect(&db_client().await.variables, None, None)
|
||||
find_collect(&db_client().variables, None, None)
|
||||
.await
|
||||
.context("failed to get variables from db")?;
|
||||
.context("failed to get variables from db")?
|
||||
.into_iter()
|
||||
.map(|mut variable| {
|
||||
if !user.admin && variable.is_secret {
|
||||
variable.value = "#".repeat(variable.value.len())
|
||||
}
|
||||
variable
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
|
||||
let toml = serialize_resources_toml(&res)
|
||||
let toml = serialize_resources_toml(res)
|
||||
.context("failed to serialize resources to toml")?;
|
||||
|
||||
Ok(ExportResourcesToTomlResponse { toml })
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_procedure(
|
||||
id: &str,
|
||||
res: &mut ResourcesToml,
|
||||
user: &User,
|
||||
names: &ResourceNames,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut procedure = resource::get_check_permissions::<Procedure>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
for stage in &mut procedure.config.stages {
|
||||
for execution in &mut stage.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::RunProcedure(exec) => exec.procedure.clone_from(
|
||||
names
|
||||
.procedures
|
||||
.get(&exec.procedure)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunBuild(exec) => exec.build.clone_from(
|
||||
names.builds.get(&exec.build).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::Deploy(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StartContainer(exec) => {
|
||||
exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
Execution::StopContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RemoveContainer(exec) => {
|
||||
exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
Execution::CloneRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PullRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StopAllContainers(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneNetworks(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneImages(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneContainers(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunSync(exec) => exec.sync.clone_from(
|
||||
names.syncs.get(&exec.sync).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::Sleep(_) | Execution::None(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
.procedures
|
||||
.push(convert_resource::<Procedure>(procedure, &names.tags));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct ResourceNames {
|
||||
tags: HashMap<String, String>,
|
||||
servers: HashMap<String, String>,
|
||||
builders: HashMap<String, String>,
|
||||
builds: HashMap<String, String>,
|
||||
repos: HashMap<String, String>,
|
||||
deployments: HashMap<String, String>,
|
||||
procedures: HashMap<String, String>,
|
||||
syncs: HashMap<String, String>,
|
||||
alerters: HashMap<String, String>,
|
||||
templates: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ResourceNames {
|
||||
async fn new() -> anyhow::Result<ResourceNames> {
|
||||
let db = db_client().await;
|
||||
Ok(ResourceNames {
|
||||
tags: find_collect(&db.tags, None, None)
|
||||
.await
|
||||
.context("failed to get all tags")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
servers: find_collect(&db.servers, None, None)
|
||||
.await
|
||||
.context("failed to get all servers")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
builders: find_collect(&db.builders, None, None)
|
||||
.await
|
||||
.context("failed to get all builders")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
builds: find_collect(&db.builds, None, None)
|
||||
.await
|
||||
.context("failed to get all builds")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
repos: find_collect(&db.repos, None, None)
|
||||
.await
|
||||
.context("failed to get all repos")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
deployments: find_collect(&db.deployments, None, None)
|
||||
.await
|
||||
.context("failed to get all deployments")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
procedures: find_collect(&db.procedures, None, None)
|
||||
.await
|
||||
.context("failed to get all procedures")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
syncs: find_collect(&db.resource_syncs, None, None)
|
||||
.await
|
||||
.context("failed to get all resource syncs")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
alerters: find_collect(&db.alerters, None, None)
|
||||
.await
|
||||
.context("failed to get all alerters")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
templates: find_collect(&db.server_templates, None, None)
|
||||
.await
|
||||
.context("failed to get all server templates")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_user_groups(
|
||||
user_groups: Vec<String>,
|
||||
res: &mut ResourcesToml,
|
||||
names: &ResourceNames,
|
||||
all: &AllResourcesById,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
let usernames = find_collect(&db.users, None, None)
|
||||
.await?
|
||||
@@ -530,33 +364,74 @@ async fn add_user_groups(
|
||||
.map(|mut permission| {
|
||||
match &mut permission.resource_target {
|
||||
ResourceTarget::Build(id) => {
|
||||
*id = names.builds.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.builds
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
*id = names.builders.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.builders
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
*id =
|
||||
names.deployments.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.deployments
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
*id = names.servers.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.servers
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
*id = names.repos.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.repos
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
*id = names.alerters.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.alerters
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
*id =
|
||||
names.procedures.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.procedures
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = names.templates.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.templates
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
*id = names.syncs.get(id).cloned().unwrap_or_default()
|
||||
*id = all
|
||||
.syncs
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
*id = all
|
||||
.stacks
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::System(_) => {}
|
||||
}
|
||||
@@ -580,263 +455,112 @@ async fn add_user_groups(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_resource<R: MonitorResource>(
|
||||
resource: Resource<R::Config, R::Info>,
|
||||
tag_names: &HashMap<String, String>,
|
||||
) -> ResourceToml<R::PartialConfig> {
|
||||
// This makes sure all non-necessary (defaulted) fields don't make it into final toml
|
||||
let partial: R::PartialConfig = resource.config.into();
|
||||
let config = R::Config::default().minimize_partial(partial);
|
||||
ResourceToml {
|
||||
name: resource.name,
|
||||
tags: resource
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|t| tag_names.get(t).cloned())
|
||||
.collect(),
|
||||
description: resource.description,
|
||||
deploy: false,
|
||||
after: Default::default(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_resources_toml(
|
||||
resources: &ResourcesToml,
|
||||
resources: ResourcesToml,
|
||||
) -> anyhow::Result<String> {
|
||||
let mut res = String::new();
|
||||
let mut toml = String::new();
|
||||
|
||||
let options = toml_pretty::Options::default()
|
||||
.tab(" ")
|
||||
.skip_empty_string(true)
|
||||
.max_inline_array_length(30);
|
||||
|
||||
for server in &resources.servers {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for server in resources.servers {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[server]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&server, options)
|
||||
.context("failed to serialize servers to toml")?,
|
||||
);
|
||||
toml.push_str("[[server]]\n");
|
||||
Server::push_to_toml_string(server, &mut toml)?;
|
||||
}
|
||||
|
||||
for deployment in &resources.deployments {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for stack in resources.stacks {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[deployment]]\n");
|
||||
let mut parsed: OrderedHashMap<String, Value> =
|
||||
serde_json::from_str(&serde_json::to_string(&deployment)?)?;
|
||||
let config = parsed
|
||||
.get_mut("config")
|
||||
.context("deployment has no config?")?
|
||||
.as_object_mut()
|
||||
.context("config is not object?")?;
|
||||
if let Some(DeploymentImage::Build { version, .. }) =
|
||||
&deployment.config.image
|
||||
{
|
||||
let image = config
|
||||
.get_mut("image")
|
||||
.context("deployment has no image")?
|
||||
.get_mut("params")
|
||||
.context("deployment image has no params")?
|
||||
.as_object_mut()
|
||||
.context("deployment image params is not object")?;
|
||||
if version.is_none() {
|
||||
image.remove("version");
|
||||
} else {
|
||||
image.insert(
|
||||
"version".to_string(),
|
||||
Value::String(version.to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
if let Some(term_signal_labels) =
|
||||
&deployment.config.term_signal_labels
|
||||
{
|
||||
config.insert(
|
||||
"term_signal_labels".to_string(),
|
||||
Value::String(term_signal_labels_to_string(
|
||||
term_signal_labels,
|
||||
)),
|
||||
);
|
||||
}
|
||||
if let Some(ports) = &deployment.config.ports {
|
||||
config.insert(
|
||||
"ports".to_string(),
|
||||
Value::String(conversions_to_string(ports)),
|
||||
);
|
||||
}
|
||||
if let Some(volumes) = &deployment.config.volumes {
|
||||
config.insert(
|
||||
"volumes".to_string(),
|
||||
Value::String(conversions_to_string(volumes)),
|
||||
);
|
||||
}
|
||||
if let Some(environment) = &deployment.config.environment {
|
||||
config.insert(
|
||||
"environment".to_string(),
|
||||
Value::String(environment_vars_to_string(environment)),
|
||||
);
|
||||
}
|
||||
if let Some(labels) = &deployment.config.labels {
|
||||
config.insert(
|
||||
"labels".to_string(),
|
||||
Value::String(environment_vars_to_string(labels)),
|
||||
);
|
||||
}
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&parsed, options)
|
||||
.context("failed to serialize deployments to toml")?,
|
||||
);
|
||||
toml.push_str("[[stack]]\n");
|
||||
Stack::push_to_toml_string(stack, &mut toml)?;
|
||||
}
|
||||
|
||||
for build in &resources.builds {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for deployment in resources.deployments {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
let mut parsed: OrderedHashMap<String, Value> =
|
||||
serde_json::from_str(&serde_json::to_string(&build)?)?;
|
||||
let config = parsed
|
||||
.get_mut("config")
|
||||
.context("build has no config?")?
|
||||
.as_object_mut()
|
||||
.context("config is not object?")?;
|
||||
if let Some(version) = &build.config.version {
|
||||
config.insert(
|
||||
"version".to_string(),
|
||||
Value::String(version.to_string()),
|
||||
);
|
||||
}
|
||||
if let Some(build_args) = &build.config.build_args {
|
||||
config.insert(
|
||||
"build_args".to_string(),
|
||||
Value::String(environment_vars_to_string(build_args)),
|
||||
);
|
||||
}
|
||||
if let Some(labels) = &build.config.labels {
|
||||
config.insert(
|
||||
"labels".to_string(),
|
||||
Value::String(environment_vars_to_string(labels)),
|
||||
);
|
||||
}
|
||||
res.push_str("[[build]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&parsed, options)
|
||||
.context("failed to serialize builds to toml")?,
|
||||
);
|
||||
toml.push_str("[[deployment]]\n");
|
||||
Deployment::push_to_toml_string(deployment, &mut toml)?;
|
||||
}
|
||||
|
||||
for repo in &resources.repos {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for build in resources.builds {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[repo]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&repo, options)
|
||||
.context("failed to serialize repos to toml")?,
|
||||
);
|
||||
toml.push_str("[[build]]\n");
|
||||
Build::push_to_toml_string(build, &mut toml)?;
|
||||
}
|
||||
|
||||
for procedure in &resources.procedures {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
}
|
||||
let mut parsed: OrderedHashMap<String, Value> =
|
||||
serde_json::from_str(&serde_json::to_string(&procedure)?)?;
|
||||
let config = parsed
|
||||
.get_mut("config")
|
||||
.context("procedure has no config?")?
|
||||
.as_object_mut()
|
||||
.context("config is not object?")?;
|
||||
|
||||
let stages = config
|
||||
.remove("stages")
|
||||
.context("procedure config has no stages")?;
|
||||
let stages = stages.as_array().context("stages is not array")?;
|
||||
|
||||
res.push_str("[[procedure]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&parsed, options)
|
||||
.context("failed to serialize procedures to toml")?,
|
||||
);
|
||||
|
||||
for stage in stages {
|
||||
res.push_str("\n\n[[procedure.config.stage]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(stage, options)
|
||||
.context("failed to serialize procedures to toml")?,
|
||||
);
|
||||
for repo in resources.repos {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[repo]]\n");
|
||||
Repo::push_to_toml_string(repo, &mut toml)?;
|
||||
}
|
||||
|
||||
for alerter in &resources.alerters {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for procedure in resources.procedures {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[alerter]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&alerter, options)
|
||||
.context("failed to serialize alerters to toml")?,
|
||||
);
|
||||
toml.push_str("[[procedure]]\n");
|
||||
Procedure::push_to_toml_string(procedure, &mut toml)?;
|
||||
}
|
||||
|
||||
for builder in &resources.builders {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for alerter in resources.alerters {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[builder]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&builder, options)
|
||||
.context("failed to serialize builders to toml")?,
|
||||
);
|
||||
toml.push_str("[[alerter]]\n");
|
||||
Alerter::push_to_toml_string(alerter, &mut toml)?;
|
||||
}
|
||||
|
||||
for server_template in &resources.server_templates {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for builder in resources.builders {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[server_template]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&server_template, options)
|
||||
.context("failed to serialize server_templates to toml")?,
|
||||
);
|
||||
toml.push_str("[[builder]]\n");
|
||||
Builder::push_to_toml_string(builder, &mut toml)?;
|
||||
}
|
||||
|
||||
for resource_sync in &resources.resource_syncs {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
for server_template in resources.server_templates {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[resource_sync]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&resource_sync, options)
|
||||
.context("failed to serialize resource_syncs to toml")?,
|
||||
);
|
||||
toml.push_str("[[server_template]]\n");
|
||||
ServerTemplate::push_to_toml_string(server_template, &mut toml)?;
|
||||
}
|
||||
|
||||
for resource_sync in resources.resource_syncs {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[resource_sync]]\n");
|
||||
ResourceSync::push_to_toml_string(resource_sync, &mut toml)?;
|
||||
}
|
||||
|
||||
for variable in &resources.variables {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[variable]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&variable, options)
|
||||
toml.push_str("[[variable]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize variables to toml")?,
|
||||
);
|
||||
}
|
||||
|
||||
for user_group in &resources.user_groups {
|
||||
if !res.is_empty() {
|
||||
res.push_str("\n\n##\n\n");
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
res.push_str("[[user_group]]\n");
|
||||
res.push_str(
|
||||
&toml_pretty::to_string(&user_group, options)
|
||||
toml.push_str("[[user_group]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(user_group, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize user_groups to toml")?,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
Ok(toml)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
|
||||
entities::{
|
||||
alerter::Alerter,
|
||||
@@ -13,11 +13,11 @@ use monitor_client::{
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{
|
||||
ResourceTarget, ResourceTargetVariant, Update, UpdateListItem,
|
||||
},
|
||||
update::{Update, UpdateListItem},
|
||||
user::User,
|
||||
ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
@@ -29,7 +29,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_user,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
@@ -45,44 +44,48 @@ impl Resolve<ListUpdates, User> for State {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
query
|
||||
} else {
|
||||
let server_query = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Server,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
let server_query =
|
||||
resource::get_resource_ids_for_user::<Server>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
|
||||
let deployment_query = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Deployment,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Deployment", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
let deployment_query =
|
||||
resource::get_resource_ids_for_user::<Deployment>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Deployment", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
|
||||
let build_query = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Build,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
let stack_query =
|
||||
resource::get_resource_ids_for_user::<Stack>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
|
||||
let build_query =
|
||||
resource::get_resource_ids_for_user::<Build>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
|
||||
let repo_query =
|
||||
get_resource_ids_for_user(&user, ResourceTargetVariant::Repo)
|
||||
resource::get_resource_ids_for_user::<Repo>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
@@ -91,45 +94,38 @@ impl Resolve<ListUpdates, User> for State {
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
|
||||
let procedure_query = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Procedure,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Procedure", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
let procedure_query =
|
||||
resource::get_resource_ids_for_user::<Procedure>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Procedure", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
|
||||
let builder_query = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Builder,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
let builder_query =
|
||||
resource::get_resource_ids_for_user::<Builder>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
|
||||
let alerter_query = get_resource_ids_for_user(
|
||||
&user,
|
||||
ResourceTargetVariant::Alerter,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
let alerter_query =
|
||||
resource::get_resource_ids_for_user::<Alerter>(&user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let server_template_query = get_resource_ids_for_user(
|
||||
let server_template_query = resource::get_resource_ids_for_user::<ServerTemplate>(
|
||||
&user,
|
||||
ResourceTargetVariant::ServerTemplate,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
@@ -139,9 +135,8 @@ impl Resolve<ListUpdates, User> for State {
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
|
||||
|
||||
let resource_sync_query = get_resource_ids_for_user(
|
||||
let resource_sync_query = resource::get_resource_ids_for_user::<ResourceSync>(
|
||||
&user,
|
||||
ResourceTargetVariant::ResourceSync,
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
@@ -155,8 +150,9 @@ impl Resolve<ListUpdates, User> for State {
|
||||
query.extend(doc! {
|
||||
"$or": [
|
||||
server_query,
|
||||
build_query,
|
||||
deployment_query,
|
||||
stack_query,
|
||||
build_query,
|
||||
repo_query,
|
||||
procedure_query,
|
||||
alerter_query,
|
||||
@@ -168,16 +164,15 @@ impl Resolve<ListUpdates, User> for State {
|
||||
query.into()
|
||||
};
|
||||
|
||||
let usernames =
|
||||
find_collect(&db_client().await.users, None, None)
|
||||
.await
|
||||
.context("failed to pull users from db")?
|
||||
.into_iter()
|
||||
.map(|u| (u.id, u.username))
|
||||
.collect::<HashMap<_, _>>();
|
||||
let usernames = find_collect(&db_client().users, None, None)
|
||||
.await
|
||||
.context("failed to pull users from db")?
|
||||
.into_iter()
|
||||
.map(|u| (u.id, u.username))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let updates = find_collect(
|
||||
&db_client().await.updates,
|
||||
&db_client().updates,
|
||||
query,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "start_ts": -1 })
|
||||
@@ -228,7 +223,7 @@ impl Resolve<GetUpdate, User> for State {
|
||||
GetUpdate { id }: GetUpdate,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let update = find_one_by_id(&db_client().await.updates, &id)
|
||||
let update = find_one_by_id(&db_client().updates, &id)
|
||||
.await
|
||||
.context("failed to query to db")?
|
||||
.context("no update exists with given id")?;
|
||||
@@ -313,6 +308,14 @@ impl Resolve<GetUpdate, User> for State {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::get_check_permissions::<Stack>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
|
||||
ListApiKeys, ListApiKeysForServiceUser,
|
||||
@@ -26,7 +26,7 @@ impl Resolve<GetUsername, User> for State {
|
||||
GetUsername { user_id }: GetUsername,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetUsernameResponse> {
|
||||
let user = find_one_by_id(&db_client().await.users, &user_id)
|
||||
let user = find_one_by_id(&db_client().users, &user_id)
|
||||
.await
|
||||
.context("failed at mongo query for user")?
|
||||
.context("no user found with id")?;
|
||||
@@ -67,7 +67,7 @@ impl Resolve<ListUsers, User> for State {
|
||||
return Err(anyhow!("this route is only accessable by admins"));
|
||||
}
|
||||
let mut users = find_collect(
|
||||
&db_client().await.users,
|
||||
&db_client().users,
|
||||
None,
|
||||
FindOptions::builder().sort(doc! { "username": 1 }).build(),
|
||||
)
|
||||
@@ -85,7 +85,7 @@ impl Resolve<ListApiKeys, User> for State {
|
||||
user: User,
|
||||
) -> anyhow::Result<ListApiKeysResponse> {
|
||||
let api_keys = find_collect(
|
||||
&db_client().await.api_keys,
|
||||
&db_client().api_keys,
|
||||
doc! { "user_id": &user.id },
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
@@ -117,7 +117,7 @@ impl Resolve<ListApiKeysForServiceUser, User> for State {
|
||||
return Err(anyhow!("Given user is not service user"));
|
||||
};
|
||||
let api_keys = find_collect(
|
||||
&db_client().await.api_keys,
|
||||
&db_client().api_keys,
|
||||
doc! { "user_id": &user.id },
|
||||
None,
|
||||
)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetUserGroup, GetUserGroupResponse, ListUserGroups,
|
||||
ListUserGroupsResponse,
|
||||
@@ -35,7 +35,6 @@ impl Resolve<GetUserGroup, User> for State {
|
||||
filter.insert("users", &user.id);
|
||||
}
|
||||
db_client()
|
||||
.await
|
||||
.user_groups
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -55,7 +54,7 @@ impl Resolve<ListUserGroups, User> for State {
|
||||
filter.insert("users", &user.id);
|
||||
}
|
||||
find_collect(
|
||||
&db_client().await.user_groups,
|
||||
&db_client().user_groups,
|
||||
filter,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetVariable, GetVariableResponse, ListVariables,
|
||||
ListVariablesResponse,
|
||||
},
|
||||
entities::user::User,
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_variable,
|
||||
state::{db_client, State},
|
||||
};
|
||||
@@ -20,9 +19,14 @@ impl Resolve<GetVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetVariable { name }: GetVariable,
|
||||
_: User,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetVariableResponse> {
|
||||
get_variable(&name).await
|
||||
let mut variable = get_variable(&name).await?;
|
||||
if !variable.is_secret || user.admin {
|
||||
return Ok(variable);
|
||||
}
|
||||
variable.value = "#".repeat(variable.value.len());
|
||||
Ok(variable)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,18 +34,27 @@ impl Resolve<ListVariables, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListVariables {}: ListVariables,
|
||||
_: User,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListVariablesResponse> {
|
||||
let variables = find_collect(
|
||||
&db_client().await.variables,
|
||||
&db_client().variables,
|
||||
None,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for variables")?;
|
||||
Ok(ListVariablesResponse {
|
||||
variables,
|
||||
secrets: core_config().secrets.keys().cloned().collect(),
|
||||
})
|
||||
if user.admin {
|
||||
return Ok(variables);
|
||||
}
|
||||
let variables = variables
|
||||
.into_iter()
|
||||
.map(|mut variable| {
|
||||
if variable.is_secret {
|
||||
variable.value = "#".repeat(variable.value.len());
|
||||
}
|
||||
variable
|
||||
})
|
||||
.collect();
|
||||
Ok(variables)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,16 @@ use std::{collections::VecDeque, time::Instant};
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Json, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::user::{
|
||||
CreateApiKey, CreateApiKeyResponse, DeleteApiKey,
|
||||
DeleteApiKeyResponse, PushRecentlyViewed,
|
||||
PushRecentlyViewedResponse, SetLastSeenUpdate,
|
||||
SetLastSeenUpdateResponse,
|
||||
},
|
||||
entities::{api_key::ApiKey, monitor_timestamp, user::User},
|
||||
entities::{api_key::ApiKey, komodo_timestamp, user::User},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -20,8 +20,8 @@ use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::{auth_request, random_string},
|
||||
helpers::query::get_user,
|
||||
auth::auth_request,
|
||||
helpers::{query::get_user, random_string},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
@@ -103,7 +103,7 @@ impl Resolve<PushRecentlyViewed, User> for State {
|
||||
}
|
||||
};
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(update),
|
||||
None,
|
||||
@@ -129,10 +129,10 @@ impl Resolve<SetLastSeenUpdate, User> for State {
|
||||
user: User,
|
||||
) -> anyhow::Result<SetLastSeenUpdateResponse> {
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
"last_update_view": monitor_timestamp()
|
||||
"last_update_view": komodo_timestamp()
|
||||
}),
|
||||
None,
|
||||
)
|
||||
@@ -168,11 +168,10 @@ impl Resolve<CreateApiKey, User> for State {
|
||||
key: key.clone(),
|
||||
secret: secret_hash,
|
||||
user_id: user.id.clone(),
|
||||
created_at: monitor_timestamp(),
|
||||
created_at: komodo_timestamp(),
|
||||
expires,
|
||||
};
|
||||
db_client()
|
||||
.await
|
||||
.api_keys
|
||||
.insert_one(api_key)
|
||||
.await
|
||||
@@ -192,7 +191,7 @@ impl Resolve<DeleteApiKey, User> for State {
|
||||
DeleteApiKey { key }: DeleteApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyResponse> {
|
||||
let client = db_client().await;
|
||||
let client = db_client();
|
||||
let key = client
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key })
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
|
||||
},
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
build::{Build, PartialBuildConfig},
|
||||
build::{Build, BuildInfo, PartialBuildConfig},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
user::User,
|
||||
NoData,
|
||||
CloneArgs, NoData,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::mongodb::bson::to_document;
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
@@ -16,8 +19,9 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::git_token,
|
||||
resource,
|
||||
state::{github_client, State},
|
||||
state::{db_client, github_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateBuild, User> for State {
|
||||
@@ -38,13 +42,15 @@ impl Resolve<CopyBuild, User> for State {
|
||||
CopyBuild { name, id }: CopyBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
let Build { config, .. } =
|
||||
let Build { mut config, .. } =
|
||||
resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
// reset version to 0.0.0
|
||||
config.version = Default::default();
|
||||
resource::create::<Build>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
@@ -71,6 +77,95 @@ impl Resolve<UpdateBuild, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RefreshBuildCache, User> for State {
|
||||
#[instrument(
|
||||
name = "RefreshBuildCache",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshBuildCache { build }: RefreshBuildCache,
|
||||
user: User,
|
||||
) -> anyhow::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// build should be able to do this.
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.repo.is_empty()
|
||||
|| build.config.git_provider.is_empty()
|
||||
{
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let config = core_config();
|
||||
|
||||
let mut clone_args: CloneArgs = (&build).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes {
|
||||
hash: latest_hash,
|
||||
message: latest_message,
|
||||
..
|
||||
} = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
|
||||
let info = BuildInfo {
|
||||
last_built_at: build.info.last_built_at,
|
||||
built_hash: build.info.built_hash,
|
||||
built_message: build.info.built_message,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
.context("failed to serialize build info to bson")?;
|
||||
|
||||
db_client()
|
||||
.builds
|
||||
.update_one(
|
||||
doc! { "name": &build.name },
|
||||
doc! { "$set": { "info": info } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update build info on db")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateBuildWebhook, User> for State {
|
||||
#[instrument(name = "CreateBuildWebhook", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -120,12 +215,22 @@ impl Resolve<CreateBuildWebhook, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
github_webhook_secret,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let webhook_secret = if build.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&build.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
@@ -139,7 +244,7 @@ impl Resolve<CreateBuildWebhook, User> for State {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: github_webhook_secret.to_string(),
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
@@ -193,6 +298,12 @@ impl Resolve<DeleteBuildWebhook, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com" {
|
||||
return Err(anyhow!(
|
||||
"Can only manage github.com repo webhooks"
|
||||
));
|
||||
}
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"No repo configured, can't delete webhook"
|
||||
@@ -221,11 +332,15 @@ impl Resolve<DeleteBuildWebhook, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
builder::Builder, permission::PermissionLevel, user::User,
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
deployment::{Deployment, DeploymentState},
|
||||
monitor_timestamp,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
to_monitor_name,
|
||||
to_komodo_name,
|
||||
update::Update,
|
||||
user::User,
|
||||
Operation,
|
||||
@@ -102,7 +102,7 @@ impl Resolve<RenameDeployment, User> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_monitor_name(&name);
|
||||
let name = to_komodo_name(&name);
|
||||
|
||||
let container_state = get_deployment_state(&deployment).await?;
|
||||
|
||||
@@ -116,10 +116,10 @@ impl Resolve<RenameDeployment, User> for State {
|
||||
make_update(&deployment, Operation::RenameDeployment, &user);
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.deployments,
|
||||
&db_client().deployments,
|
||||
&deployment.id,
|
||||
mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": monitor_timestamp() },
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use anyhow::anyhow;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{UpdateDescription, UpdateDescriptionResponse},
|
||||
entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
sync::ResourceSync, update::ResourceTarget, user::User,
|
||||
server::Server, server_template::ServerTemplate, stack::Stack,
|
||||
sync::ResourceSync, user::User, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
@@ -100,6 +100,14 @@ impl Resolve<UpdateDescription, User> for State {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_description::<Stack>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateDescriptionResponse {})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,8 @@ use std::time::Instant;
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::write::*, entities::user::User};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
@@ -19,17 +20,22 @@ mod deployment;
|
||||
mod description;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod service_user;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolver, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
@@ -49,6 +55,7 @@ pub enum WriteRequest {
|
||||
SetUsersInUserGroup(SetUsersInUserGroup),
|
||||
|
||||
// ==== PERMISSIONS ====
|
||||
UpdateUserAdmin(UpdateUserAdmin),
|
||||
UpdateUserBasePermissions(UpdateUserBasePermissions),
|
||||
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
|
||||
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
|
||||
@@ -62,7 +69,6 @@ pub enum WriteRequest {
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
CreateNetwork(CreateNetwork),
|
||||
DeleteNetwork(DeleteNetwork),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
CreateDeployment(CreateDeployment),
|
||||
@@ -76,6 +82,7 @@ pub enum WriteRequest {
|
||||
CopyBuild(CopyBuild),
|
||||
DeleteBuild(DeleteBuild),
|
||||
UpdateBuild(UpdateBuild),
|
||||
RefreshBuildCache(RefreshBuildCache),
|
||||
CreateBuildWebhook(CreateBuildWebhook),
|
||||
DeleteBuildWebhook(DeleteBuildWebhook),
|
||||
|
||||
@@ -96,6 +103,7 @@ pub enum WriteRequest {
|
||||
CopyRepo(CopyRepo),
|
||||
DeleteRepo(DeleteRepo),
|
||||
UpdateRepo(UpdateRepo),
|
||||
RefreshRepoCache(RefreshRepoCache),
|
||||
CreateRepoWebhook(CreateRepoWebhook),
|
||||
DeleteRepoWebhook(DeleteRepoWebhook),
|
||||
|
||||
@@ -117,9 +125,21 @@ pub enum WriteRequest {
|
||||
DeleteResourceSync(DeleteResourceSync),
|
||||
UpdateResourceSync(UpdateResourceSync),
|
||||
RefreshResourceSyncPending(RefreshResourceSyncPending),
|
||||
CommitSync(CommitSync),
|
||||
CreateSyncWebhook(CreateSyncWebhook),
|
||||
DeleteSyncWebhook(DeleteSyncWebhook),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
CreateStackWebhook(CreateStackWebhook),
|
||||
DeleteStackWebhook(DeleteStackWebhook),
|
||||
|
||||
// ==== TAG ====
|
||||
CreateTag(CreateTag),
|
||||
DeleteTag(DeleteTag),
|
||||
@@ -130,7 +150,16 @@ pub enum WriteRequest {
|
||||
CreateVariable(CreateVariable),
|
||||
UpdateVariableValue(UpdateVariableValue),
|
||||
UpdateVariableDescription(UpdateVariableDescription),
|
||||
UpdateVariableIsSecret(UpdateVariableIsSecret),
|
||||
DeleteVariable(DeleteVariable),
|
||||
|
||||
// ==== PROVIDERS ====
|
||||
CreateGitProviderAccount(CreateGitProviderAccount),
|
||||
UpdateGitProviderAccount(UpdateGitProviderAccount),
|
||||
DeleteGitProviderAccount(DeleteGitProviderAccount),
|
||||
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
|
||||
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
|
||||
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -156,7 +185,11 @@ async fn handler(
|
||||
Ok((TypedHeader(ContentType::json()), res??))
|
||||
}
|
||||
|
||||
#[instrument(name = "WriteRequest", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
name = "WriteRequest",
|
||||
skip(user, request),
|
||||
fields(user_id = user.id, request = format!("{:?}", request.extract_variant()))
|
||||
)]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: WriteRequest,
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
UpdatePermissionOnResourceType,
|
||||
UpdatePermissionOnResourceTypeResponse, UpdatePermissionOnTarget,
|
||||
UpdatePermissionOnTargetResponse, UpdateUserBasePermissions,
|
||||
UpdatePermissionOnTargetResponse, UpdateUserAdmin,
|
||||
UpdateUserAdminResponse, UpdateUserBasePermissions,
|
||||
UpdateUserBasePermissionsResponse,
|
||||
},
|
||||
entities::{
|
||||
permission::{UserTarget, UserTargetVariant},
|
||||
update::{ResourceTarget, ResourceTargetVariant},
|
||||
user::User,
|
||||
ResourceTarget, ResourceTargetVariant,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
@@ -28,6 +29,40 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<UpdateUserAdmin, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateUserAdmin { user_id, admin }: UpdateUserAdmin,
|
||||
super_admin: User,
|
||||
) -> anyhow::Result<UpdateUserAdminResponse> {
|
||||
if !super_admin.super_admin {
|
||||
return Err(anyhow!("Only super admins can call this method."));
|
||||
}
|
||||
let user = find_one_by_id(&db_client().users, &user_id)
|
||||
.await
|
||||
.context("failed to query mongo for user")?
|
||||
.context("did not find user with given id")?;
|
||||
|
||||
if !user.enabled {
|
||||
return Err(anyhow!("User is disabled. Enable user first."));
|
||||
}
|
||||
|
||||
if user.super_admin {
|
||||
return Err(anyhow!("Cannot update other super admins"));
|
||||
}
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user_id,
|
||||
doc! { "$set": { "admin": admin } },
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(UpdateUserAdminResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateUserBasePermissions, User> for State {
|
||||
#[instrument(name = "UpdateUserBasePermissions", skip(self, admin))]
|
||||
async fn resolve(
|
||||
@@ -44,13 +79,18 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
|
||||
return Err(anyhow!("this method is admin only"));
|
||||
}
|
||||
|
||||
let user = find_one_by_id(&db_client().await.users, &user_id)
|
||||
let user = find_one_by_id(&db_client().users, &user_id)
|
||||
.await
|
||||
.context("failed to query mongo for user")?
|
||||
.context("did not find user with given id")?;
|
||||
if user.admin {
|
||||
if user.super_admin {
|
||||
return Err(anyhow!(
|
||||
"cannot use this method to update other admins permissions"
|
||||
"Cannot use this method to update super admins permissions"
|
||||
));
|
||||
}
|
||||
if user.admin && !admin.super_admin {
|
||||
return Err(anyhow!(
|
||||
"Only super admins can use this method to update other admins permissions"
|
||||
));
|
||||
}
|
||||
let mut update_doc = Document::new();
|
||||
@@ -65,7 +105,7 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
|
||||
}
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&db_client().users,
|
||||
&user_id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
@@ -119,7 +159,6 @@ impl Resolve<UpdatePermissionOnResourceType, User> for State {
|
||||
match user_target_variant {
|
||||
UserTargetVariant::User => {
|
||||
db_client()
|
||||
.await
|
||||
.users
|
||||
.update_one(filter, update)
|
||||
.await
|
||||
@@ -129,7 +168,6 @@ impl Resolve<UpdatePermissionOnResourceType, User> for State {
|
||||
}
|
||||
UserTargetVariant::UserGroup => {
|
||||
db_client()
|
||||
.await
|
||||
.user_groups
|
||||
.update_one(filter, update)
|
||||
.await
|
||||
@@ -181,7 +219,6 @@ impl Resolve<UpdatePermissionOnTarget, User> for State {
|
||||
(user_target_variant.as_ref(), resource_variant.as_ref());
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.permissions
|
||||
.update_one(
|
||||
doc! {
|
||||
@@ -218,7 +255,6 @@ async fn extract_user_target_with_validation(
|
||||
Err(_) => doc! { "username": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.users
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -233,7 +269,6 @@ async fn extract_user_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.user_groups
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -260,7 +295,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.builds
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -275,7 +309,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.builders
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -290,7 +323,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.deployments
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -305,7 +337,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.servers
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -320,7 +351,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.repos
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -335,7 +365,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.alerters
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -350,7 +379,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.procedures
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -365,7 +393,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.server_templates
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -380,7 +407,6 @@ async fn extract_resource_target_with_validation(
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.resource_syncs
|
||||
.find_one(filter)
|
||||
.await
|
||||
@@ -389,5 +415,19 @@ async fn extract_resource_target_with_validation(
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ResourceSync, id))
|
||||
}
|
||||
ResourceTarget::Stack(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.stacks
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for stacks")?
|
||||
.context("no matching stack found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Stack, id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, procedure::Procedure, user::User,
|
||||
|
||||
399
bin/core/src/api/write/provider.rs
Normal file
399
bin/core/src/api/write/provider.rs
Normal file
@@ -0,0 +1,399 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
provider::{DockerRegistryAccount, GitProviderAccount},
|
||||
user::User,
|
||||
Operation, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::update::{add_update, make_update},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateGitProviderAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateGitProviderAccount { account }: CreateGitProviderAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"only admins can create git provider accounts"
|
||||
));
|
||||
}
|
||||
|
||||
let mut account: GitProviderAccount = account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(anyhow!("domain cannot be empty string."));
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string."));
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::CreateGitProviderAccount,
|
||||
&user,
|
||||
);
|
||||
|
||||
account.id = db_client()
|
||||
.git_accounts
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context("failed to create git provider account on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"create git provider account",
|
||||
format!(
|
||||
"Created git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
),
|
||||
);
|
||||
|
||||
update.finalize();
|
||||
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for create git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(account)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateGitProviderAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateGitProviderAccount { id, mut account }: UpdateGitProviderAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"only admins can update git provider accounts"
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(domain) = &account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"cannot update git provider with empty domain"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(username) = &account.username {
|
||||
if username.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"cannot update git provider with empty username"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure update does not change id
|
||||
account.id = None;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateGitProviderAccount,
|
||||
&user,
|
||||
);
|
||||
|
||||
let account = to_document(&account).context(
|
||||
"failed to serialize partial git provider account to bson",
|
||||
)?;
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
&db.git_accounts,
|
||||
&id,
|
||||
doc! { "$set": account },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update git provider account on db")?;
|
||||
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.git_accounts, &id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id"));
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"update git provider account",
|
||||
format!(
|
||||
"Updated git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
),
|
||||
);
|
||||
|
||||
update.finalize();
|
||||
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for update git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(account)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteGitProviderAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteGitProviderAccount { id }: DeleteGitProviderAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"only admins can delete git provider accounts"
|
||||
));
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateGitProviderAccount,
|
||||
&user,
|
||||
);
|
||||
|
||||
let db = db_client();
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.git_accounts, &id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id"));
|
||||
};
|
||||
delete_one_by_id(&db.git_accounts, &id, None)
|
||||
.await
|
||||
.context("failed to delete git account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"delete git provider account",
|
||||
format!(
|
||||
"Deleted git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
),
|
||||
);
|
||||
|
||||
update.finalize();
|
||||
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for delete git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(account)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateDockerRegistryAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateDockerRegistryAccount { account }: CreateDockerRegistryAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"only admins can create docker registry account accounts"
|
||||
));
|
||||
}
|
||||
|
||||
let mut account: DockerRegistryAccount = account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(anyhow!("domain cannot be empty string."));
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string."));
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::CreateDockerRegistryAccount,
|
||||
&user,
|
||||
);
|
||||
|
||||
account.id = db_client()
|
||||
.registry_accounts
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context(
|
||||
"failed to create docker registry account account on db",
|
||||
)?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"create docker registry account",
|
||||
format!(
|
||||
"Created docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
),
|
||||
);
|
||||
|
||||
update.finalize();
|
||||
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for create docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(account)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateDockerRegistryAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateDockerRegistryAccount { id, mut account }: UpdateDockerRegistryAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"only admins can update docker registry accounts"
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(domain) = &account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"cannot update docker registry account with empty domain"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(username) = &account.username {
|
||||
if username.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"cannot update docker registry account with empty username"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
account.id = None;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateDockerRegistryAccount,
|
||||
&user,
|
||||
);
|
||||
|
||||
let account = to_document(&account).context(
|
||||
"failed to serialize partial docker registry account account to bson",
|
||||
)?;
|
||||
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
&db.registry_accounts,
|
||||
&id,
|
||||
doc! { "$set": account },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"failed to update docker registry account account on db",
|
||||
)?;
|
||||
|
||||
let Some(account) = find_one_by_id(&db.registry_accounts, &id)
|
||||
.await
|
||||
.context("failed to query db for registry accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id"));
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"update docker registry account",
|
||||
format!(
|
||||
"Updated docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
),
|
||||
);
|
||||
|
||||
update.finalize();
|
||||
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for update docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(account)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteDockerRegistryAccount, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteDockerRegistryAccount { id }: DeleteDockerRegistryAccount,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"only admins can delete docker registry accounts"
|
||||
));
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateDockerRegistryAccount,
|
||||
&user,
|
||||
);
|
||||
|
||||
let db = db_client();
|
||||
let Some(account) = find_one_by_id(&db.registry_accounts, &id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id"));
|
||||
};
|
||||
delete_one_by_id(&db.registry_accounts, &id, None)
|
||||
.await
|
||||
.context("failed to delete registry account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"delete registry account",
|
||||
format!(
|
||||
"Deleted registry account for {} with username {}",
|
||||
account.domain, account.username
|
||||
),
|
||||
);
|
||||
|
||||
update.finalize();
|
||||
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for delete docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
Ok(account)
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::{PartialRepoConfig, Repo},
|
||||
repo::{PartialRepoConfig, Repo, RepoInfo},
|
||||
user::User,
|
||||
NoData,
|
||||
CloneArgs, NoData,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::mongodb::bson::to_document;
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
@@ -16,8 +19,9 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::git_token,
|
||||
resource,
|
||||
state::{github_client, State},
|
||||
state::{db_client, github_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateRepo, User> for State {
|
||||
@@ -71,6 +75,92 @@ impl Resolve<UpdateRepo, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RefreshRepoCache, User> for State {
|
||||
#[instrument(
|
||||
name = "RefreshRepoCache",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshRepoCache { repo }: RefreshRepoCache,
|
||||
user: User,
|
||||
) -> anyhow::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// repo should be able to do this.
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider.is_empty()
|
||||
|| repo.config.repo.is_empty()
|
||||
{
|
||||
// Nothing to do
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let mut clone_args: CloneArgs = (&repo).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to update repo at {repo_path:?}")
|
||||
})?;
|
||||
|
||||
let info = RepoInfo {
|
||||
last_pulled_at: repo.info.last_pulled_at,
|
||||
last_built_at: repo.info.last_built_at,
|
||||
built_hash: repo.info.built_hash,
|
||||
built_message: repo.info.built_message,
|
||||
latest_hash: hash,
|
||||
latest_message: message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
.context("failed to serialize repo info to bson")?;
|
||||
|
||||
db_client()
|
||||
.repos
|
||||
.update_one(
|
||||
doc! { "name": &repo.name },
|
||||
doc! { "$set": { "info": info } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update repo info on db")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateRepoWebhook, User> for State {
|
||||
#[instrument(name = "CreateRepoWebhook", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -120,12 +210,22 @@ impl Resolve<CreateRepoWebhook, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
github_webhook_secret,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let webhook_secret = if repo.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&repo.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
@@ -133,6 +233,9 @@ impl Resolve<CreateRepoWebhook, User> for State {
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
@@ -146,7 +249,7 @@ impl Resolve<CreateRepoWebhook, User> for State {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: github_webhook_secret.to_string(),
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
@@ -200,6 +303,12 @@ impl Resolve<DeleteRepoWebhook, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com" {
|
||||
return Err(anyhow!(
|
||||
"Can only manage github.com repo webhooks"
|
||||
));
|
||||
}
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"No repo configured, can't create webhook"
|
||||
@@ -229,11 +338,15 @@ impl Resolve<DeleteRepoWebhook, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
@@ -241,6 +354,9 @@ impl Resolve<DeleteRepoWebhook, User> for State {
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Update, UpdateStatus},
|
||||
@@ -73,7 +73,7 @@ impl Resolve<RenameServer, User> for State {
|
||||
let mut update =
|
||||
make_update(&server, Operation::RenameServer, &user);
|
||||
|
||||
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": monitor_timestamp() }), None)
|
||||
update_one_by_id(&db_client().servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": komodo_timestamp() }), None)
|
||||
.await
|
||||
.context("failed to update server on db. this name may already be taken.")?;
|
||||
update.push_simple_log(
|
||||
@@ -124,42 +124,3 @@ impl Resolve<CreateNetwork, User> for State {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteNetwork, User> for State {
|
||||
#[instrument(name = "DeleteNetwork", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteNetwork { server, name }: DeleteNetwork,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::DeleteNetwork, &user);
|
||||
update.status = UpdateStatus::InProgress;
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
match periphery
|
||||
.request(api::network::DeleteNetwork { name })
|
||||
.await
|
||||
{
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => update.push_error_log(
|
||||
"delete network",
|
||||
format_serror(&e.context("failed to delete network").into()),
|
||||
),
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
|
||||
UpdateServerTemplate,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::{
|
||||
user::CreateApiKey,
|
||||
write::{
|
||||
@@ -13,7 +13,7 @@ use monitor_client::{
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
komodo_timestamp,
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
@@ -48,15 +48,15 @@ impl Resolve<CreateServiceUser, User> for State {
|
||||
config,
|
||||
enabled: true,
|
||||
admin: false,
|
||||
super_admin: false,
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
last_update_view: 0,
|
||||
recents: Default::default(),
|
||||
all: Default::default(),
|
||||
updated_at: monitor_timestamp(),
|
||||
updated_at: komodo_timestamp(),
|
||||
};
|
||||
user.id = db_client()
|
||||
.await
|
||||
.users
|
||||
.insert_one(&user)
|
||||
.await
|
||||
@@ -85,7 +85,7 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
let service_user = db
|
||||
.users
|
||||
.find_one(doc! { "username": &username })
|
||||
@@ -124,11 +124,10 @@ impl Resolve<CreateApiKeyForServiceUser, User> for State {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let service_user = find_one_by_id(&db_client().users, &user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
@@ -148,7 +147,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
let api_key = db
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key })
|
||||
@@ -156,7 +155,7 @@ impl Resolve<DeleteApiKeyForServiceUser, User> for State {
|
||||
.context("failed to query db for api key")?
|
||||
.context("did not find matching api key")?;
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &api_key.user_id)
|
||||
find_one_by_id(&db_client().users, &api_key.user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
|
||||
593
bin/core/src/api/write/stack.rs
Normal file
593
bin/core/src/api/write/stack.rs
Normal file
@@ -0,0 +1,593 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::ServerState,
|
||||
stack::{PartialStackConfig, Stack, StackInfo},
|
||||
update::Update,
|
||||
user::{stack_user, User},
|
||||
FileContents, NoData, Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
|
||||
WriteComposeContentsToHost,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::get_server_with_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
resource,
|
||||
stack::{
|
||||
get_stack_and_server,
|
||||
remote::{get_remote_compose_contents, RemoteComposeContents},
|
||||
services::extract_services_into_res,
|
||||
},
|
||||
state::{db_client, github_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateStack, User> for State {
|
||||
#[instrument(name = "CreateStack", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateStack { name, config }: CreateStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
resource::create::<Stack>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyStack, User> for State {
|
||||
#[instrument(name = "CopyStack", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyStack { name, id }: CopyStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
let Stack { config, .. } =
|
||||
resource::get_check_permissions::<Stack>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Stack>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteStack, User> for State {
|
||||
#[instrument(name = "DeleteStack", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteStack { id }: DeleteStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
resource::delete::<Stack>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateStack, User> for State {
|
||||
#[instrument(name = "UpdateStack", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateStack { id, config }: UpdateStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Stack> {
|
||||
resource::update::<Stack>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RenameStack, User> for State {
|
||||
#[instrument(name = "RenameStack", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RenameStack { id, name }: RenameStack,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&stack, Operation::RenameStack, &user);
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().stacks,
|
||||
&stack.id,
|
||||
mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update stack name on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"rename stack",
|
||||
format!("renamed stack from {} to {}", stack.name, name),
|
||||
);
|
||||
update.finalize();
|
||||
|
||||
add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteStackFileContents, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
WriteStackFileContents {
|
||||
stack,
|
||||
file_path,
|
||||
contents,
|
||||
}: WriteStackFileContents,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !stack.config.files_on_host {
|
||||
return Err(anyhow!(
|
||||
"Stack is not configured to use files on host, can't write file contents"
|
||||
));
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&stack, Operation::WriteStackContents, &user);
|
||||
|
||||
update.push_simple_log("File contents to write", &contents);
|
||||
|
||||
match periphery_client(&server)?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
file_path,
|
||||
contents,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write file contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = State
|
||||
.resolve(
|
||||
RefreshStackCache { stack: stack.id },
|
||||
stack_user().to_owned(),
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RefreshStackCache, User> for State {
|
||||
#[instrument(
|
||||
name = "RefreshStackCache",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshStackCache { stack }: RefreshStackCache,
|
||||
user: User,
|
||||
) -> anyhow::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// stack should be able to do this.
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let file_contents_empty = stack.config.file_contents.is_empty();
|
||||
|
||||
if !stack.config.files_on_host
|
||||
&& file_contents_empty
|
||||
&& stack.config.repo.is_empty()
|
||||
{
|
||||
// Nothing to do without one of these
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let mut missing_files = Vec::new();
|
||||
|
||||
let (
|
||||
latest_services,
|
||||
remote_contents,
|
||||
remote_errors,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
) = if stack.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
if stack.config.server_id.is_empty() {
|
||||
(vec![], None, None, None, None)
|
||||
} else {
|
||||
let (server, status) =
|
||||
get_server_with_state(&stack.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
(vec![], None, None, None, None)
|
||||
} else {
|
||||
let GetComposeContentsOnHostResponse { contents, errors } =
|
||||
match periphery_client(&server)?
|
||||
.request(GetComposeContentsOnHost {
|
||||
file_paths: stack.file_paths().to_vec(),
|
||||
name: stack.name.clone(),
|
||||
run_directory: stack.config.run_directory.clone(),
|
||||
})
|
||||
.await
|
||||
.context(
|
||||
"failed to get compose file contents from host",
|
||||
) {
|
||||
Ok(res) => res,
|
||||
Err(e) => GetComposeContentsOnHostResponse {
|
||||
contents: Default::default(),
|
||||
errors: vec![FileContents {
|
||||
path: stack.config.run_directory.clone(),
|
||||
contents: format_serror(&e.into()),
|
||||
}],
|
||||
},
|
||||
};
|
||||
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &contents {
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
&mut services,
|
||||
) {
|
||||
warn!(
|
||||
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
|
||||
stack.name
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
(services, Some(contents), Some(errors), None, None)
|
||||
}
|
||||
}
|
||||
} else if file_contents_empty {
|
||||
// ================
|
||||
// REPO BASED STACK
|
||||
// ================
|
||||
let RemoteComposeContents {
|
||||
successful: remote_contents,
|
||||
errored: remote_errors,
|
||||
hash: latest_hash,
|
||||
message: latest_message,
|
||||
..
|
||||
} =
|
||||
get_remote_compose_contents(&stack, Some(&mut missing_files))
|
||||
.await?;
|
||||
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &remote_contents {
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
&mut services,
|
||||
) {
|
||||
warn!(
|
||||
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
|
||||
stack.name
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
(
|
||||
services,
|
||||
Some(remote_contents),
|
||||
Some(remote_errors),
|
||||
latest_hash,
|
||||
latest_message,
|
||||
)
|
||||
} else {
|
||||
// =============
|
||||
// UI BASED FILE
|
||||
// =============
|
||||
let mut services = Vec::new();
|
||||
if let Err(e) = extract_services_into_res(
|
||||
// this should latest (not deployed), so make the project name fresh.
|
||||
&stack.project_name(true),
|
||||
&stack.config.file_contents,
|
||||
&mut services,
|
||||
) {
|
||||
warn!(
|
||||
"failed to extract stack services, things won't works correctly. stack: {} | {e:#}",
|
||||
stack.name
|
||||
);
|
||||
services.extend(stack.info.latest_services);
|
||||
};
|
||||
(services, None, None, None, None)
|
||||
};
|
||||
|
||||
let info = StackInfo {
|
||||
missing_files,
|
||||
deployed_services: stack.info.deployed_services,
|
||||
deployed_project_name: stack.info.deployed_project_name,
|
||||
deployed_contents: stack.info.deployed_contents,
|
||||
deployed_hash: stack.info.deployed_hash,
|
||||
deployed_message: stack.info.deployed_message,
|
||||
latest_services,
|
||||
remote_contents,
|
||||
remote_errors,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
.context("failed to serialize stack info to bson")?;
|
||||
|
||||
db_client()
|
||||
.stacks
|
||||
.update_one(
|
||||
doc! { "name": &stack.name },
|
||||
doc! { "$set": { "info": info } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update stack info on db")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateStackWebhook, User> for State {
|
||||
#[instrument(name = "CreateStackWebhook", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateStackWebhook { stack, action }: CreateStackWebhook,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateStackWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
));
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.repo.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"No repo configured, can't create webhook"
|
||||
));
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Stack repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(anyhow!(
|
||||
"Cannot manage repo webhooks under owner {owner}"
|
||||
));
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Stack repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if stack.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&stack.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match action {
|
||||
StackWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id)
|
||||
}
|
||||
StackWebhookAction::Deploy => {
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !stack.config.webhook_enabled {
|
||||
self
|
||||
.resolve(
|
||||
UpdateStack {
|
||||
id: stack.id,
|
||||
config: PartialStackConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
},
|
||||
user,
|
||||
)
|
||||
.await
|
||||
.context("failed to update stack to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteStackWebhook, User> for State {
|
||||
#[instrument(name = "DeleteStackWebhook", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteStackWebhook { stack, action }: DeleteStackWebhook,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteStackWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
));
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com" {
|
||||
return Err(anyhow!(
|
||||
"Can only manage github.com repo webhooks"
|
||||
));
|
||||
}
|
||||
|
||||
if stack.config.repo.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"No repo configured, can't create webhook"
|
||||
));
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Stack repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(anyhow!(
|
||||
"Cannot manage repo webhooks under owner {owner}"
|
||||
));
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Sync repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match action {
|
||||
StackWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id)
|
||||
}
|
||||
StackWebhookAction::Deploy => {
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
@@ -1,28 +1,30 @@
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use formatting::format_serror;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
use komodo_client::{
|
||||
api::{read::ExportAllResourcesToToml, write::*},
|
||||
entities::{
|
||||
self,
|
||||
alert::{Alert, AlertData},
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
config::core::CoreConfig,
|
||||
monitor_timestamp,
|
||||
deployment::Deployment,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::{stats::SeverityLevel, Server},
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, PendingSyncUpdates,
|
||||
PendingSyncUpdatesData, PendingSyncUpdatesDataErr,
|
||||
PendingSyncUpdatesDataOk, ResourceSync,
|
||||
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
|
||||
},
|
||||
update::ResourceTarget,
|
||||
user::User,
|
||||
NoData,
|
||||
update::Log,
|
||||
user::{sync_user, User},
|
||||
NoData, Operation, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
@@ -35,17 +37,18 @@ use octorust::types::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
config::core_config,
|
||||
helpers::{
|
||||
alert::send_alerts,
|
||||
query::get_id_to_tags,
|
||||
sync::{
|
||||
deployment,
|
||||
resource::{get_updates_for_view, AllResourcesById},
|
||||
},
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
resource,
|
||||
resource::{self, refresh_resource_sync_state_cache},
|
||||
state::{db_client, github_client, State},
|
||||
sync::{
|
||||
deploy::SyncDeployParams, remote::RemoteResources,
|
||||
view::push_updates_for_view, AllResourcesById,
|
||||
},
|
||||
};
|
||||
|
||||
impl Resolve<CreateResourceSync, User> for State {
|
||||
@@ -101,6 +104,11 @@ impl Resolve<UpdateResourceSync, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
#[instrument(
|
||||
name = "RefreshResourceSyncPending",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
|
||||
@@ -108,154 +116,238 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// sync should be able to do this.
|
||||
let sync = resource::get_check_permissions::<
|
||||
let mut sync = resource::get_check_permissions::<
|
||||
entities::sync::ResourceSync,
|
||||
>(&sync, &user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(anyhow!("resource sync repo not configured"));
|
||||
if !sync.config.managed
|
||||
&& !sync.config.files_on_host
|
||||
&& sync.config.file_contents.is_empty()
|
||||
&& sync.config.repo.is_empty()
|
||||
{
|
||||
// Sync not configured, nothing to refresh
|
||||
return Ok(sync);
|
||||
}
|
||||
|
||||
let res = async {
|
||||
let (res, _, hash, message) =
|
||||
crate::helpers::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
let resources = res?;
|
||||
let RemoteResources {
|
||||
resources,
|
||||
files,
|
||||
file_errors,
|
||||
hash,
|
||||
message,
|
||||
..
|
||||
} = crate::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
sync.info.remote_contents = files;
|
||||
sync.info.remote_errors = file_errors;
|
||||
sync.info.pending_hash = hash;
|
||||
sync.info.pending_message = message;
|
||||
|
||||
if !sync.info.remote_errors.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Remote resources have errors. Cannot compute diffs."
|
||||
));
|
||||
}
|
||||
|
||||
let resources = resources?;
|
||||
|
||||
let all_resources = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
let all_resources = AllResourcesById::load().await?;
|
||||
|
||||
let data = PendingSyncUpdatesDataOk {
|
||||
server_updates: get_updates_for_view::<Server>(
|
||||
let deployments_by_name = all_resources
|
||||
.deployments
|
||||
.values()
|
||||
.map(|deployment| {
|
||||
(deployment.name.clone(), deployment.clone())
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
let stacks_by_name = all_resources
|
||||
.stacks
|
||||
.values()
|
||||
.map(|stack| (stack.name.clone(), stack.clone()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let deploy_updates =
|
||||
crate::sync::deploy::get_updates_for_view(SyncDeployParams {
|
||||
deployments: &resources.deployments,
|
||||
deployment_map: &deployments_by_name,
|
||||
stacks: &resources.stacks,
|
||||
stack_map: &stacks_by_name,
|
||||
all_resources: &all_resources,
|
||||
})
|
||||
.await;
|
||||
|
||||
let delete = sync.config.managed || sync.config.delete;
|
||||
|
||||
let mut diffs = Vec::new();
|
||||
|
||||
{
|
||||
push_updates_for_view::<Server>(
|
||||
resources.servers,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get server updates")?,
|
||||
deployment_updates: deployment::get_updates_for_view(
|
||||
.await?;
|
||||
push_updates_for_view::<Stack>(
|
||||
resources.stacks,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await?;
|
||||
push_updates_for_view::<Deployment>(
|
||||
resources.deployments,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get deployment updates")?,
|
||||
build_updates: get_updates_for_view::<Build>(
|
||||
.await?;
|
||||
push_updates_for_view::<Build>(
|
||||
resources.builds,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get build updates")?,
|
||||
repo_updates: get_updates_for_view::<Repo>(
|
||||
.await?;
|
||||
push_updates_for_view::<Repo>(
|
||||
resources.repos,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get repo updates")?,
|
||||
procedure_updates: get_updates_for_view::<Procedure>(
|
||||
.await?;
|
||||
push_updates_for_view::<Procedure>(
|
||||
resources.procedures,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get procedure updates")?,
|
||||
alerter_updates: get_updates_for_view::<Alerter>(
|
||||
resources.alerters,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get alerter updates")?,
|
||||
builder_updates: get_updates_for_view::<Builder>(
|
||||
.await?;
|
||||
push_updates_for_view::<Builder>(
|
||||
resources.builders,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get builder updates")?,
|
||||
server_template_updates:
|
||||
get_updates_for_view::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get server template updates")?,
|
||||
resource_sync_updates: get_updates_for_view::<
|
||||
entities::sync::ResourceSync,
|
||||
>(
|
||||
.await?;
|
||||
push_updates_for_view::<Alerter>(
|
||||
resources.alerters,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await?;
|
||||
push_updates_for_view::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await?;
|
||||
push_updates_for_view::<ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
sync.config.delete,
|
||||
delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resource sync updates")?,
|
||||
variable_updates:
|
||||
crate::helpers::sync::variables::get_updates_for_view(
|
||||
resources.variables,
|
||||
sync.config.delete,
|
||||
)
|
||||
.await
|
||||
.context("failed to get variable updates")?,
|
||||
user_group_updates:
|
||||
crate::helpers::sync::user_groups::get_updates_for_view(
|
||||
resources.user_groups,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await
|
||||
.context("failed to get user group updates")?,
|
||||
};
|
||||
anyhow::Ok((hash, message, data))
|
||||
.await?;
|
||||
}
|
||||
|
||||
let variable_updates =
|
||||
crate::sync::variables::get_updates_for_view(
|
||||
&resources.variables,
|
||||
// Delete doesn't work with variables when match tags are set
|
||||
sync.config.match_tags.is_empty() && delete,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let user_group_updates =
|
||||
crate::sync::user_groups::get_updates_for_view(
|
||||
resources.user_groups,
|
||||
// Delete doesn't work with user groups when match tags are set
|
||||
sync.config.match_tags.is_empty() && delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?;
|
||||
|
||||
anyhow::Ok((
|
||||
diffs,
|
||||
deploy_updates,
|
||||
variable_updates,
|
||||
user_group_updates,
|
||||
))
|
||||
}
|
||||
.await;
|
||||
|
||||
let (pending, has_updates) = match res {
|
||||
Ok((hash, message, data)) => {
|
||||
let has_updates = !data.no_updates();
|
||||
(
|
||||
PendingSyncUpdates {
|
||||
hash: Some(hash),
|
||||
message: Some(message),
|
||||
data: PendingSyncUpdatesData::Ok(data),
|
||||
},
|
||||
has_updates,
|
||||
)
|
||||
}
|
||||
let (
|
||||
resource_updates,
|
||||
deploy_updates,
|
||||
variable_updates,
|
||||
user_group_updates,
|
||||
pending_error,
|
||||
) = match res {
|
||||
Ok(res) => (res.0, res.1, res.2, res.3, None),
|
||||
Err(e) => (
|
||||
PendingSyncUpdates {
|
||||
hash: None,
|
||||
message: None,
|
||||
data: PendingSyncUpdatesData::Err(
|
||||
PendingSyncUpdatesDataErr {
|
||||
message: format_serror(&e.into()),
|
||||
},
|
||||
),
|
||||
},
|
||||
false,
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Default::default(),
|
||||
Some(format_serror(&e.into())),
|
||||
),
|
||||
};
|
||||
|
||||
let pending = to_document(&pending)
|
||||
let has_updates = !resource_updates.is_empty()
|
||||
|| !deploy_updates.to_deploy == 0
|
||||
|| !variable_updates.is_empty()
|
||||
|| !user_group_updates.is_empty();
|
||||
|
||||
let info = ResourceSyncInfo {
|
||||
last_sync_ts: sync.info.last_sync_ts,
|
||||
last_sync_hash: sync.info.last_sync_hash,
|
||||
last_sync_message: sync.info.last_sync_message,
|
||||
remote_contents: sync.info.remote_contents,
|
||||
remote_errors: sync.info.remote_errors,
|
||||
pending_hash: sync.info.pending_hash,
|
||||
pending_message: sync.info.pending_message,
|
||||
pending_deploy: deploy_updates,
|
||||
resource_updates,
|
||||
variable_updates,
|
||||
user_group_updates,
|
||||
pending_error,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
.context("failed to serialize pending to document")?;
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.resource_syncs,
|
||||
&db_client().resource_syncs,
|
||||
&sync.id,
|
||||
doc! { "$set": { "info.pending": pending } },
|
||||
doc! { "$set": { "info": info } },
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
@@ -264,9 +356,8 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
let id = sync.id.clone();
|
||||
let name = sync.name.clone();
|
||||
tokio::task::spawn(async move {
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
let Some(existing) = db_client()
|
||||
.await
|
||||
.alerts
|
||||
.find_one(doc! {
|
||||
"resolved": false,
|
||||
@@ -285,7 +376,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
(None, true) => {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts: monitor_timestamp(),
|
||||
ts: komodo_timestamp(),
|
||||
resolved: false,
|
||||
level: SeverityLevel::Ok,
|
||||
target: ResourceTarget::ResourceSync(id.clone()),
|
||||
@@ -308,7 +399,7 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
doc! {
|
||||
"$set": {
|
||||
"resolved": true,
|
||||
"resolved_ts": monitor_timestamp()
|
||||
"resolved_ts": komodo_timestamp()
|
||||
}
|
||||
},
|
||||
None,
|
||||
@@ -327,6 +418,135 @@ impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CommitSync, User> for State {
|
||||
#[instrument(name = "CommitSync", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CommitSync { sync }: CommitSync,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
let sync = resource::get_check_permissions::<
|
||||
entities::sync::ResourceSync,
|
||||
>(&sync, &user, PermissionLevel::Write)
|
||||
.await?;
|
||||
|
||||
let fresh_sync = !sync.config.files_on_host
|
||||
&& sync.config.file_contents.is_empty()
|
||||
&& sync.config.repo.is_empty();
|
||||
|
||||
if !sync.config.managed && !fresh_sync {
|
||||
return Err(anyhow!(
|
||||
"Cannot commit to sync. Enabled 'managed' mode."
|
||||
));
|
||||
}
|
||||
|
||||
let res = State
|
||||
.resolve(
|
||||
ExportAllResourcesToToml {
|
||||
tags: sync.config.match_tags,
|
||||
},
|
||||
sync_user().to_owned(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::ResourceSync(sync.id),
|
||||
Operation::CommitSync,
|
||||
&user,
|
||||
);
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
if sync.config.files_on_host {
|
||||
let path = sync
|
||||
.config
|
||||
.resource_path
|
||||
.parse::<PathBuf>()
|
||||
.context("Resource path is not valid file path")?;
|
||||
let extension = path
|
||||
.extension()
|
||||
.context("Resource path missing '.toml' extension")?;
|
||||
if extension != "toml" {
|
||||
return Err(anyhow!("Wrong file extension. Expected '.toml', got '.{extension:?}'"));
|
||||
}
|
||||
if let Some(parent) = path.parent() {
|
||||
let _ = tokio::fs::create_dir_all(&parent).await;
|
||||
};
|
||||
if let Err(e) =
|
||||
tokio::fs::write(&sync.config.resource_path, &res.toml)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write resource file to {}",
|
||||
sync.config.resource_path
|
||||
)
|
||||
})
|
||||
{
|
||||
update.push_error_log(
|
||||
"Write resource file",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
add_update(update).await?;
|
||||
return resource::get::<ResourceSync>(&sync.name).await;
|
||||
}
|
||||
} else if let Err(e) = db_client()
|
||||
.resource_syncs
|
||||
.update_one(
|
||||
doc! { "name": &sync.name },
|
||||
doc! { "$set": { "config.file_contents": &res.toml } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update file_contents on db")
|
||||
{
|
||||
update.push_error_log(
|
||||
"Write resource to database",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
add_update(update).await?;
|
||||
return resource::get::<ResourceSync>(&sync.name).await;
|
||||
}
|
||||
|
||||
update
|
||||
.logs
|
||||
.push(Log::simple("Committed resources", res.toml));
|
||||
|
||||
let res = match State
|
||||
.resolve(RefreshResourceSyncPending { sync: sync.name }, user)
|
||||
.await
|
||||
{
|
||||
Ok(sync) => Ok(sync),
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Refresh sync pending",
|
||||
format_serror(&(&e).into()),
|
||||
);
|
||||
Err(e)
|
||||
}
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_resource_sync_state_cache().await;
|
||||
}
|
||||
update_update(update).await?;
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateSyncWebhook, User> for State {
|
||||
#[instrument(name = "CreateSyncWebhook", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -376,12 +596,22 @@ impl Resolve<CreateSyncWebhook, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
github_webhook_secret,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let webhook_secret = if sync.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&sync.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match action {
|
||||
SyncWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id)
|
||||
@@ -402,7 +632,7 @@ impl Resolve<CreateSyncWebhook, User> for State {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: github_webhook_secret.to_string(),
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
@@ -456,6 +686,12 @@ impl Resolve<DeleteSyncWebhook, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.git_provider != "github.com" {
|
||||
return Err(anyhow!(
|
||||
"Can only manage github.com repo webhooks"
|
||||
));
|
||||
}
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"No repo configured, can't create webhook"
|
||||
@@ -485,11 +721,15 @@ impl Resolve<DeleteSyncWebhook, User> for State {
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
github_webhook_base_url,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = github_webhook_base_url.as_ref().unwrap_or(host);
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match action {
|
||||
SyncWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
|
||||
UpdateTagsOnResourceResponse,
|
||||
@@ -10,8 +10,8 @@ use monitor_client::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
procedure::Procedure, repo::Repo, server::Server,
|
||||
server_template::ServerTemplate, sync::ResourceSync, tag::Tag,
|
||||
update::ResourceTarget, user::User,
|
||||
server_template::ServerTemplate, stack::Stack,
|
||||
sync::ResourceSync, tag::Tag, user::User, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
@@ -44,7 +44,6 @@ impl Resolve<CreateTag, User> for State {
|
||||
};
|
||||
|
||||
tag.id = db_client()
|
||||
.await
|
||||
.tags
|
||||
.insert_one(&tag)
|
||||
.await
|
||||
@@ -59,6 +58,7 @@ impl Resolve<CreateTag, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<RenameTag, User> for State {
|
||||
#[instrument(name = "RenameTag", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RenameTag { id, name }: RenameTag,
|
||||
@@ -71,7 +71,7 @@ impl Resolve<RenameTag, User> for State {
|
||||
get_tag_check_owner(&id, &user).await?;
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.tags,
|
||||
&db_client().tags,
|
||||
&id,
|
||||
doc! { "$set": { "name": name } },
|
||||
None,
|
||||
@@ -95,6 +95,7 @@ impl Resolve<DeleteTag, User> for State {
|
||||
tokio::try_join!(
|
||||
resource::remove_tag_from_all::<Server>(&id),
|
||||
resource::remove_tag_from_all::<Deployment>(&id),
|
||||
resource::remove_tag_from_all::<Stack>(&id),
|
||||
resource::remove_tag_from_all::<Build>(&id),
|
||||
resource::remove_tag_from_all::<Repo>(&id),
|
||||
resource::remove_tag_from_all::<Builder>(&id),
|
||||
@@ -103,7 +104,7 @@ impl Resolve<DeleteTag, User> for State {
|
||||
resource::remove_tag_from_all::<ServerTemplate>(&id),
|
||||
)?;
|
||||
|
||||
delete_one_by_id(&db_client().await.tags, &id, None).await?;
|
||||
delete_one_by_id(&db_client().tags, &id, None).await?;
|
||||
|
||||
Ok(tag)
|
||||
}
|
||||
@@ -200,6 +201,15 @@ impl Resolve<UpdateTagsOnResource, User> for State {
|
||||
.await?;
|
||||
resource::update_tags::<ResourceSync>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::get_check_permissions::<Stack>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Stack>(&id, tags, user).await?
|
||||
}
|
||||
};
|
||||
Ok(UpdateTagsOnResourceResponse {})
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
|
||||
RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup,
|
||||
},
|
||||
entities::{monitor_timestamp, user::User, user_group::UserGroup},
|
||||
entities::{komodo_timestamp, user::User, user_group::UserGroup},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
@@ -30,10 +30,10 @@ impl Resolve<CreateUserGroup, User> for State {
|
||||
id: Default::default(),
|
||||
users: Default::default(),
|
||||
all: Default::default(),
|
||||
updated_at: monitor_timestamp(),
|
||||
updated_at: komodo_timestamp(),
|
||||
name,
|
||||
};
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
let id = db
|
||||
.user_groups
|
||||
.insert_one(user_group)
|
||||
@@ -59,7 +59,7 @@ impl Resolve<RenameUserGroup, User> for State {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
&db.user_groups,
|
||||
&id,
|
||||
@@ -85,7 +85,7 @@ impl Resolve<DeleteUserGroup, User> for State {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
let ug = find_one_by_id(&db.user_groups, &id)
|
||||
.await
|
||||
@@ -118,7 +118,7 @@ impl Resolve<AddUserToUserGroup, User> for State {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
let filter = match ObjectId::from_str(&user) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
@@ -163,7 +163,7 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
let filter = match ObjectId::from_str(&user) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
@@ -205,7 +205,7 @@ impl Resolve<SetUsersInUserGroup, User> for State {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
let db = db_client();
|
||||
|
||||
let all_users = find_collect(&db.users, None, None)
|
||||
.await
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateVariable, CreateVariableResponse, DeleteVariable,
|
||||
DeleteVariableResponse, UpdateVariableDescription,
|
||||
UpdateVariableDescriptionResponse, UpdateVariableValue,
|
||||
UpdateVariableDescriptionResponse, UpdateVariableIsSecret,
|
||||
UpdateVariableIsSecretResponse, UpdateVariableValue,
|
||||
UpdateVariableValueResponse,
|
||||
},
|
||||
entities::{
|
||||
update::ResourceTarget, user::User, variable::Variable, Operation,
|
||||
user::User, variable::Variable, Operation, ResourceTarget,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
@@ -22,12 +23,14 @@ use crate::{
|
||||
};
|
||||
|
||||
impl Resolve<CreateVariable, User> for State {
|
||||
#[instrument(name = "CreateVariable", skip(self, user, value))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateVariable {
|
||||
name,
|
||||
value,
|
||||
description,
|
||||
is_secret,
|
||||
}: CreateVariable,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateVariableResponse> {
|
||||
@@ -39,10 +42,10 @@ impl Resolve<CreateVariable, User> for State {
|
||||
name,
|
||||
value,
|
||||
description,
|
||||
is_secret,
|
||||
};
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.insert_one(&variable)
|
||||
.await
|
||||
@@ -65,13 +68,14 @@ impl Resolve<CreateVariable, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableValue, User> for State {
|
||||
#[instrument(name = "UpdateVariableValue", skip(self, user, value))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableValue { name, value }: UpdateVariableValue,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableValueResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
return Err(anyhow!("only admins can update variables"));
|
||||
}
|
||||
|
||||
let variable = get_variable(&name).await?;
|
||||
@@ -81,7 +85,6 @@ impl Resolve<UpdateVariableValue, User> for State {
|
||||
}
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
@@ -96,13 +99,19 @@ impl Resolve<UpdateVariableValue, User> for State {
|
||||
&user,
|
||||
);
|
||||
|
||||
update.push_simple_log(
|
||||
"update variable value",
|
||||
let log = if variable.is_secret {
|
||||
format!(
|
||||
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
|
||||
variable.value.replace(|_| true, "#")
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
|
||||
variable.value
|
||||
),
|
||||
);
|
||||
)
|
||||
};
|
||||
|
||||
update.push_simple_log("update variable value", log);
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
@@ -112,16 +121,16 @@ impl Resolve<UpdateVariableValue, User> for State {
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableDescription, User> for State {
|
||||
#[instrument(name = "UpdateVariableDescription", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableDescription { name, description }: UpdateVariableDescription,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableDescriptionResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
return Err(anyhow!("only admins can update variables"));
|
||||
}
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
@@ -133,6 +142,28 @@ impl Resolve<UpdateVariableDescription, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableIsSecret, User> for State {
|
||||
#[instrument(name = "UpdateVariableIsSecret", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableIsSecret { name, is_secret }: UpdateVariableIsSecret,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableIsSecretResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables"));
|
||||
}
|
||||
db_client()
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
doc! { "$set": { "is_secret": is_secret } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable is secret on db")?;
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -140,11 +171,10 @@ impl Resolve<DeleteVariable, User> for State {
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
return Err(anyhow!("only admins can delete variables"));
|
||||
}
|
||||
let variable = get_variable(&name).await?;
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.delete_one(doc! { "name": &name })
|
||||
.await
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::entities::config::core::{
|
||||
use komodo_client::entities::config::core::{
|
||||
CoreConfig, OauthCredentials,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
@@ -9,8 +9,8 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
auth::{random_string, STATE_PREFIX_LENGTH},
|
||||
config::core_config,
|
||||
auth::STATE_PREFIX_LENGTH, config::core_config,
|
||||
helpers::random_string,
|
||||
};
|
||||
|
||||
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {
|
||||
|
||||
@@ -2,11 +2,11 @@ use anyhow::{anyhow, Context};
|
||||
use axum::{
|
||||
extract::Query, response::Redirect, routing::get, Router,
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::entities::{
|
||||
monitor_timestamp,
|
||||
use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
@@ -64,25 +64,30 @@ async fn callback(
|
||||
let github_user =
|
||||
client.get_github_user(&token.access_token).await?;
|
||||
let github_id = github_user.id.to_string();
|
||||
let db_client = db_client().await;
|
||||
let db_client = db_client();
|
||||
let user = db_client
|
||||
.users
|
||||
.find_one(doc! { "config.data.github_id": &github_id })
|
||||
.await
|
||||
.context("failed at find user query from mongo")?;
|
||||
.context("failed at find user query from database")?;
|
||||
let jwt = match user {
|
||||
Some(user) => jwt_client()
|
||||
.generate(user.id)
|
||||
.context("failed to generate jwt")?,
|
||||
None => {
|
||||
let ts = monitor_timestamp();
|
||||
let ts = komodo_timestamp();
|
||||
let no_users_exist =
|
||||
db_client.users.find_one(Document::new()).await?.is_none();
|
||||
let core_config = core_config();
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username: github_user.login,
|
||||
enabled: no_users_exist,
|
||||
enabled: no_users_exist || core_config.enable_new_users,
|
||||
admin: no_users_exist,
|
||||
super_admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use jwt::Token;
|
||||
use monitor_client::entities::config::core::{
|
||||
use komodo_client::entities::config::core::{
|
||||
CoreConfig, OauthCredentials,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
@@ -11,8 +11,8 @@ use serde_json::Value;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
auth::{random_string, STATE_PREFIX_LENGTH},
|
||||
config::core_config,
|
||||
auth::STATE_PREFIX_LENGTH, config::core_config,
|
||||
helpers::random_string,
|
||||
};
|
||||
|
||||
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {
|
||||
@@ -73,7 +73,7 @@ impl GoogleOauthClient {
|
||||
client_id: id.clone(),
|
||||
client_secret: secret.clone(),
|
||||
redirect_uri: format!("{host}/auth/google/callback"),
|
||||
user_agent: String::from("monitor"),
|
||||
user_agent: String::from("komodo"),
|
||||
states: Default::default(),
|
||||
scopes,
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ use async_timing_util::unix_timestamp_ms;
|
||||
use axum::{
|
||||
extract::Query, response::Redirect, routing::get, Router,
|
||||
};
|
||||
use komodo_client::entities::user::{User, UserConfig};
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::entities::user::{User, UserConfig};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
@@ -73,7 +73,7 @@ async fn callback(
|
||||
.await?;
|
||||
let google_user = client.get_google_user(&token.id_token)?;
|
||||
let google_id = google_user.id.to_string();
|
||||
let db_client = db_client().await;
|
||||
let db_client = db_client();
|
||||
let user = db_client
|
||||
.users
|
||||
.find_one(doc! { "config.data.google_id": &google_id })
|
||||
@@ -87,6 +87,10 @@ async fn callback(
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
let no_users_exist =
|
||||
db_client.users.find_one(Document::new()).await?.is_none();
|
||||
let core_config = core_config();
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username: google_user
|
||||
@@ -96,8 +100,9 @@ async fn callback(
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
enabled: no_users_exist,
|
||||
enabled: no_users_exist || core_config.enable_new_users,
|
||||
admin: no_users_exist,
|
||||
super_admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
|
||||
@@ -6,13 +6,13 @@ use async_timing_util::{
|
||||
};
|
||||
use hmac::{Hmac, Mac};
|
||||
use jwt::SignWithKey;
|
||||
use monitor_client::entities::config::core::CoreConfig;
|
||||
use komodo_client::entities::config::core::CoreConfig;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use super::random_string;
|
||||
use crate::helpers::random_string;
|
||||
|
||||
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
|
||||
|
||||
@@ -25,26 +25,31 @@ pub struct JwtClaims {
|
||||
|
||||
pub struct JwtClient {
|
||||
pub key: Hmac<Sha256>,
|
||||
valid_for_ms: u128,
|
||||
ttl_ms: u128,
|
||||
exchange_tokens: ExchangeTokenMap,
|
||||
}
|
||||
|
||||
impl JwtClient {
|
||||
pub fn new(config: &CoreConfig) -> JwtClient {
|
||||
let key = Hmac::new_from_slice(random_string(40).as_bytes())
|
||||
.expect("failed at taking HmacSha256 of jwt secret");
|
||||
JwtClient {
|
||||
pub fn new(config: &CoreConfig) -> anyhow::Result<JwtClient> {
|
||||
let secret = if config.jwt_secret.is_empty() {
|
||||
random_string(40)
|
||||
} else {
|
||||
config.jwt_secret.clone()
|
||||
};
|
||||
let key = Hmac::new_from_slice(secret.as_bytes())
|
||||
.context("failed at taking HmacSha256 of jwt secret")?;
|
||||
Ok(JwtClient {
|
||||
key,
|
||||
valid_for_ms: get_timelength_in_ms(
|
||||
config.jwt_valid_for.to_string().parse().unwrap(),
|
||||
ttl_ms: get_timelength_in_ms(
|
||||
config.jwt_ttl.to_string().parse()?,
|
||||
),
|
||||
exchange_tokens: Default::default(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
|
||||
let iat = unix_timestamp_ms();
|
||||
let exp = iat + self.valid_for_ms;
|
||||
let exp = iat + self.ttl_ms;
|
||||
let claims = JwtClaims {
|
||||
id: user_id,
|
||||
iat,
|
||||
|
||||
@@ -3,14 +3,14 @@ use std::str::FromStr;
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use axum::http::HeaderMap;
|
||||
use mongo_indexed::Document;
|
||||
use monitor_client::{
|
||||
use komodo_client::{
|
||||
api::auth::{
|
||||
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
|
||||
LoginLocalUserResponse,
|
||||
},
|
||||
entities::user::{User, UserConfig},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -29,35 +29,42 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
|
||||
CreateLocalUser { username, password }: CreateLocalUser,
|
||||
_: HeaderMap,
|
||||
) -> anyhow::Result<CreateLocalUserResponse> {
|
||||
if !core_config().local_auth {
|
||||
return Err(anyhow!("local auth is not enabled"));
|
||||
let core_config = core_config();
|
||||
|
||||
if !core_config.local_auth {
|
||||
return Err(anyhow!("Local auth is not enabled"));
|
||||
}
|
||||
|
||||
if username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string"));
|
||||
return Err(anyhow!("Username cannot be empty string"));
|
||||
}
|
||||
|
||||
if ObjectId::from_str(&username).is_ok() {
|
||||
return Err(anyhow!("username cannot be valid ObjectId"));
|
||||
return Err(anyhow!("Username cannot be valid ObjectId"));
|
||||
}
|
||||
|
||||
if password.is_empty() {
|
||||
return Err(anyhow!("Password cannot be empty string"));
|
||||
}
|
||||
|
||||
let password = bcrypt::hash(password, BCRYPT_COST)
|
||||
.context("failed to hash password")?;
|
||||
|
||||
let no_users_exist = db_client()
|
||||
.await
|
||||
.users
|
||||
.find_one(Document::new())
|
||||
.await?
|
||||
.is_none();
|
||||
let no_users_exist =
|
||||
db_client().users.find_one(Document::new()).await?.is_none();
|
||||
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username,
|
||||
enabled: no_users_exist,
|
||||
enabled: no_users_exist || core_config.enable_new_users,
|
||||
admin: no_users_exist,
|
||||
super_admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
@@ -68,7 +75,6 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
|
||||
};
|
||||
|
||||
let user_id = db_client()
|
||||
.await
|
||||
.users
|
||||
.insert_one(user)
|
||||
.await
|
||||
@@ -98,7 +104,6 @@ impl Resolve<LoginLocalUser, HeaderMap> for State {
|
||||
}
|
||||
|
||||
let user = db_client()
|
||||
.await
|
||||
.users
|
||||
.find_one(doc! { "username": &username })
|
||||
.await
|
||||
|
||||
@@ -5,9 +5,8 @@ use axum::{
|
||||
extract::Request, http::HeaderMap, middleware::Next,
|
||||
response::Response,
|
||||
};
|
||||
use monitor_client::entities::{monitor_timestamp, user::User};
|
||||
use komodo_client::entities::{komodo_timestamp, user::User};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
@@ -22,14 +21,15 @@ use self::jwt::JwtClaims;
|
||||
pub mod github;
|
||||
pub mod google;
|
||||
pub mod jwt;
|
||||
pub mod oidc;
|
||||
|
||||
mod local;
|
||||
|
||||
const STATE_PREFIX_LENGTH: usize = 20;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct RedirectQuery {
|
||||
pub redirect: Option<String>,
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct RedirectQuery {
|
||||
redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
@@ -45,14 +45,6 @@ pub async fn auth_request(
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
pub fn random_string(length: usize) -> String {
|
||||
thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(length)
|
||||
.map(char::from)
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn get_user_id_from_headers(
|
||||
headers: &HeaderMap,
|
||||
@@ -125,13 +117,12 @@ pub async fn auth_api_key_get_user_id(
|
||||
secret: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let key = db_client()
|
||||
.await
|
||||
.api_keys
|
||||
.find_one(doc! { "key": key })
|
||||
.await
|
||||
.context("failed to query db")?
|
||||
.context("no api key matching key")?;
|
||||
if key.expires != 0 && key.expires < monitor_timestamp() {
|
||||
if key.expires != 0 && key.expires < komodo_timestamp() {
|
||||
return Err(anyhow!("api key expired"));
|
||||
}
|
||||
if bcrypt::verify(secret, &key.secret)
|
||||
|
||||
67
bin/core/src/auth/oidc/client.rs
Normal file
67
bin/core/src/auth/oidc/client.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::Context;
|
||||
use openidconnect::{
|
||||
core::{CoreClient, CoreProviderMetadata},
|
||||
reqwest::async_http_client,
|
||||
ClientId, ClientSecret, IssuerUrl, RedirectUrl,
|
||||
};
|
||||
|
||||
use crate::config::core_config;
|
||||
|
||||
static DEFAULT_OIDC_CLIENT: OnceLock<Option<CoreClient>> =
|
||||
OnceLock::new();
|
||||
|
||||
pub fn default_oidc_client() -> Option<&'static CoreClient> {
|
||||
DEFAULT_OIDC_CLIENT
|
||||
.get()
|
||||
.expect("OIDC client get before init")
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
pub async fn init_default_oidc_client() {
|
||||
let config = core_config();
|
||||
if !config.oidc_enabled
|
||||
|| config.oidc_provider.is_empty()
|
||||
|| config.oidc_client_id.is_empty()
|
||||
|| config.oidc_client_secret.is_empty()
|
||||
{
|
||||
DEFAULT_OIDC_CLIENT
|
||||
.set(None)
|
||||
.expect("Default OIDC client initialized twice");
|
||||
return;
|
||||
}
|
||||
async {
|
||||
// Use OpenID Connect Discovery to fetch the provider metadata.
|
||||
let provider_metadata = CoreProviderMetadata::discover_async(
|
||||
IssuerUrl::new(config.oidc_provider.clone())?,
|
||||
async_http_client,
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"Failed to get OIDC /.well-known/openid-configuration",
|
||||
)?;
|
||||
|
||||
// Create an OpenID Connect client by specifying the client ID, client secret, authorization URL
|
||||
// and token URL.
|
||||
let client = CoreClient::from_provider_metadata(
|
||||
provider_metadata,
|
||||
ClientId::new(config.oidc_client_id.to_string()),
|
||||
Some(ClientSecret::new(config.oidc_client_secret.to_string())),
|
||||
)
|
||||
// Set the URL the user will be redirected to after the authorization process.
|
||||
.set_redirect_uri(RedirectUrl::new(format!(
|
||||
"{}/auth/oidc/callback",
|
||||
core_config().host
|
||||
))?);
|
||||
|
||||
DEFAULT_OIDC_CLIENT
|
||||
.set(Some(client))
|
||||
.expect("Default OIDC client initialized twice");
|
||||
|
||||
anyhow::Ok(())
|
||||
}
|
||||
.await
|
||||
.context("Failed to init default OIDC client")
|
||||
.unwrap();
|
||||
}
|
||||
267
bin/core/src/auth/oidc/mod.rs
Normal file
267
bin/core/src/auth/oidc/mod.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{
|
||||
extract::Query, response::Redirect, routing::get, Router,
|
||||
};
|
||||
use client::default_oidc_client;
|
||||
use dashmap::DashMap;
|
||||
use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, Document};
|
||||
use openidconnect::{
|
||||
core::CoreAuthenticationFlow, AccessTokenHash, AuthorizationCode,
|
||||
CsrfToken, Nonce, OAuth2TokenResponse, PkceCodeChallenge,
|
||||
PkceCodeVerifier, Scope, TokenResponse,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
use super::RedirectQuery;
|
||||
|
||||
pub mod client;
|
||||
|
||||
/// CSRF tokens can only be used once from the callback,
|
||||
/// and must be used within this timeframe
|
||||
const CSRF_VALID_FOR_MS: i64 = 120_000; // 2 minutes for user to log in.
|
||||
|
||||
type RedirectUrl = Option<String>;
|
||||
type CsrfMap =
|
||||
DashMap<String, (PkceCodeVerifier, Nonce, RedirectUrl, i64)>;
|
||||
fn csrf_verifier_tokens() -> &'static CsrfMap {
|
||||
static CSRF: OnceLock<CsrfMap> = OnceLock::new();
|
||||
CSRF.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/login",
|
||||
get(|query| async {
|
||||
login(query).await.status_code(StatusCode::UNAUTHORIZED)
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/callback",
|
||||
get(|query| async {
|
||||
callback(query).await.status_code(StatusCode::UNAUTHORIZED)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(name = "OidcRedirect", level = "debug")]
|
||||
async fn login(
|
||||
Query(RedirectQuery { redirect }): Query<RedirectQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
let client =
|
||||
default_oidc_client().context("OIDC Client not configured")?;
|
||||
|
||||
// Generate a PKCE challenge.
|
||||
let (pkce_challenge, pkce_verifier) =
|
||||
PkceCodeChallenge::new_random_sha256();
|
||||
|
||||
// Generate the authorization URL.
|
||||
let (auth_url, csrf_token, nonce) = client
|
||||
.authorize_url(
|
||||
CoreAuthenticationFlow::AuthorizationCode,
|
||||
CsrfToken::new_random,
|
||||
Nonce::new_random,
|
||||
)
|
||||
.add_scope(Scope::new("openid".to_string()))
|
||||
.add_scope(Scope::new("email".to_string()))
|
||||
.set_pkce_challenge(pkce_challenge)
|
||||
.url();
|
||||
|
||||
// Data inserted here will be matched on callback side for csrf protection.
|
||||
csrf_verifier_tokens().insert(
|
||||
csrf_token.secret().clone(),
|
||||
(
|
||||
pkce_verifier,
|
||||
nonce,
|
||||
redirect,
|
||||
komodo_timestamp() + CSRF_VALID_FOR_MS,
|
||||
),
|
||||
);
|
||||
|
||||
let config = core_config();
|
||||
let redirect = if !config.oidc_redirect.is_empty() {
|
||||
Redirect::to(
|
||||
auth_url
|
||||
.as_str()
|
||||
.replace(&config.oidc_provider, &config.oidc_redirect)
|
||||
.as_str(),
|
||||
)
|
||||
} else {
|
||||
Redirect::to(auth_url.as_str())
|
||||
};
|
||||
|
||||
Ok(redirect)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct CallbackQuery {
|
||||
state: Option<String>,
|
||||
code: Option<String>,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(name = "OidcCallback", level = "debug")]
|
||||
async fn callback(
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
let client =
|
||||
default_oidc_client().context("OIDC Client not configured")?;
|
||||
|
||||
if let Some(e) = query.error {
|
||||
return Err(anyhow!("Provider returned error: {e}"));
|
||||
}
|
||||
|
||||
let code = query.code.context("Provider did not return code")?;
|
||||
let state = CsrfToken::new(
|
||||
query.state.context("Provider did not return state")?,
|
||||
);
|
||||
|
||||
let (_, (pkce_verifier, nonce, redirect, valid_until)) =
|
||||
csrf_verifier_tokens()
|
||||
.remove(state.secret())
|
||||
.context("CSRF Token invalid")?;
|
||||
|
||||
if komodo_timestamp() > valid_until {
|
||||
return Err(anyhow!(
|
||||
"CSRF token invalid (Timed out). The token must be "
|
||||
));
|
||||
}
|
||||
|
||||
let token_response = client
|
||||
.exchange_code(AuthorizationCode::new(code))
|
||||
// Set the PKCE code verifier.
|
||||
.set_pkce_verifier(pkce_verifier)
|
||||
.request_async(openidconnect::reqwest::async_http_client)
|
||||
.await
|
||||
.context("Failed to get Oauth token")?;
|
||||
|
||||
// Extract the ID token claims after verifying its authenticity and nonce.
|
||||
let id_token = token_response
|
||||
.id_token()
|
||||
.context("OIDC Server did not return an ID token")?;
|
||||
|
||||
// Some providers attach additional audiences, they must be added here
|
||||
// so token verification succeeds.
|
||||
let verifier = client.id_token_verifier();
|
||||
let additional_audiences = &core_config().oidc_additional_audiences;
|
||||
let verifier = if additional_audiences.is_empty() {
|
||||
verifier
|
||||
} else {
|
||||
verifier.set_other_audience_verifier_fn(|aud| {
|
||||
additional_audiences.contains(aud)
|
||||
})
|
||||
};
|
||||
|
||||
let claims = id_token
|
||||
.claims(&verifier, &nonce)
|
||||
.context("Failed to verify token claims")?;
|
||||
|
||||
// Verify the access token hash to ensure that the access token hasn't been substituted for
|
||||
// another user's.
|
||||
if let Some(expected_access_token_hash) = claims.access_token_hash()
|
||||
{
|
||||
let actual_access_token_hash = AccessTokenHash::from_token(
|
||||
token_response.access_token(),
|
||||
&id_token.signing_alg()?,
|
||||
)?;
|
||||
if actual_access_token_hash != *expected_access_token_hash {
|
||||
return Err(anyhow!("Invalid access token"));
|
||||
}
|
||||
}
|
||||
|
||||
let user_id = claims.subject().as_str();
|
||||
|
||||
let db_client = db_client();
|
||||
let user = db_client
|
||||
.users
|
||||
.find_one(doc! {
|
||||
"config.data.provider": &core_config().oidc_provider,
|
||||
"config.data.user_id": user_id
|
||||
})
|
||||
.await
|
||||
.context("failed at find user query from database")?;
|
||||
|
||||
let jwt = match user {
|
||||
Some(user) => jwt_client()
|
||||
.generate(user.id)
|
||||
.context("failed to generate jwt")?,
|
||||
None => {
|
||||
let ts = komodo_timestamp();
|
||||
let no_users_exist =
|
||||
db_client.users.find_one(Document::new()).await?.is_none();
|
||||
let core_config = core_config();
|
||||
if !no_users_exist && core_config.disable_user_registration {
|
||||
return Err(anyhow!("User registration is disabled"));
|
||||
}
|
||||
// Will use preferred_username, then email, then user_id if it isn't available.
|
||||
let username = claims
|
||||
.preferred_username()
|
||||
.map(|username| username.to_string())
|
||||
.unwrap_or_else(|| {
|
||||
let email = claims
|
||||
.email()
|
||||
.map(|email| email.as_str())
|
||||
.unwrap_or(user_id);
|
||||
if core_config.oidc_use_full_email {
|
||||
email
|
||||
} else {
|
||||
email
|
||||
.split_once('@')
|
||||
.map(|(username, _)| username)
|
||||
.unwrap_or(email)
|
||||
}
|
||||
.to_string()
|
||||
});
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username,
|
||||
enabled: no_users_exist || core_config.enable_new_users,
|
||||
admin: no_users_exist,
|
||||
super_admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recents: Default::default(),
|
||||
all: Default::default(),
|
||||
config: UserConfig::Oidc {
|
||||
provider: core_config.oidc_provider.clone(),
|
||||
user_id: user_id.to_string(),
|
||||
},
|
||||
};
|
||||
let user_id = db_client
|
||||
.users
|
||||
.insert_one(user)
|
||||
.await
|
||||
.context("failed to create user on database")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
jwt_client()
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
}
|
||||
};
|
||||
let exchange_token = jwt_client().create_exchange_token(jwt).await;
|
||||
let redirect_url = if let Some(redirect) = redirect {
|
||||
let splitter = if redirect.contains('?') { '&' } else { '?' };
|
||||
format!("{}{splitter}token={exchange_token}", redirect)
|
||||
} else {
|
||||
format!("{}?token={exchange_token}", core_config().host)
|
||||
};
|
||||
Ok(Redirect::to(&redirect_url))
|
||||
}
|
||||
@@ -12,15 +12,14 @@ use aws_sdk_ec2::{
|
||||
Client,
|
||||
};
|
||||
use base64::Engine;
|
||||
use monitor_client::entities::{
|
||||
alert::{Alert, AlertData},
|
||||
monitor_timestamp,
|
||||
server::stats::SeverityLevel,
|
||||
use komodo_client::entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
komodo_timestamp,
|
||||
server_template::aws::AwsServerTemplateConfig,
|
||||
update::ResourceTarget,
|
||||
ResourceTarget,
|
||||
};
|
||||
|
||||
use crate::{config::core_config, helpers::alert::send_alerts};
|
||||
use crate::{alert::send_alerts, config::core_config};
|
||||
|
||||
const POLL_RATE_SECS: u64 = 2;
|
||||
const MAX_POLL_TRIES: usize = 30;
|
||||
@@ -66,6 +65,7 @@ pub async fn launch_ec2_instance(
|
||||
use_public_ip,
|
||||
user_data,
|
||||
port: _,
|
||||
use_https: _,
|
||||
} = config;
|
||||
let instance_type = handle_unknown_instance_type(
|
||||
InstanceType::from(instance_type.as_str()),
|
||||
@@ -171,7 +171,7 @@ pub async fn terminate_ec2_instance_with_retry(
|
||||
error!("failed to terminate aws instance {instance_id}.");
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts: monitor_timestamp(),
|
||||
ts: komodo_timestamp(),
|
||||
resolved: false,
|
||||
level: SeverityLevel::Critical,
|
||||
target: ResourceTarget::system(),
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use aws_config::{BehaviorVersion, Region};
|
||||
use aws_sdk_ecr::Client as EcrClient;
|
||||
use run_command::async_run_command;
|
||||
|
||||
#[tracing::instrument(skip(access_key_id, secret_access_key))]
|
||||
async fn make_ecr_client(
|
||||
region: String,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
) -> EcrClient {
|
||||
std::env::set_var("AWS_ACCESS_KEY_ID", access_key_id);
|
||||
std::env::set_var("AWS_SECRET_ACCESS_KEY", secret_access_key);
|
||||
let region = Region::new(region);
|
||||
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
|
||||
.region(region)
|
||||
.load()
|
||||
.await;
|
||||
EcrClient::new(&config)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(access_key_id, secret_access_key))]
|
||||
pub async fn maybe_create_repo(
|
||||
repo: &str,
|
||||
region: String,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let client =
|
||||
make_ecr_client(region, access_key_id, secret_access_key).await;
|
||||
|
||||
let existing = client
|
||||
.describe_repositories()
|
||||
.send()
|
||||
.await
|
||||
.context("failed to describe existing repositories")?
|
||||
.repositories
|
||||
.unwrap_or_default();
|
||||
|
||||
if existing.iter().any(|r| {
|
||||
if let Some(name) = r.repository_name() {
|
||||
name == repo
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}) {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
client
|
||||
.create_repository()
|
||||
.repository_name(repo)
|
||||
.send()
|
||||
.await
|
||||
.context("failed to create repository")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a token docker login.
|
||||
///
|
||||
/// Requires the aws cli be installed on the host
|
||||
#[tracing::instrument(skip(access_key_id, secret_access_key))]
|
||||
pub async fn get_ecr_token(
|
||||
region: &str,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let log = async_run_command(&format!(
|
||||
"AWS_ACCESS_KEY_ID={access_key_id} AWS_SECRET_ACCESS_KEY={secret_access_key} aws ecr get-login-password --region {region}"
|
||||
))
|
||||
.await;
|
||||
|
||||
if log.success() {
|
||||
Ok(log.stdout)
|
||||
} else {
|
||||
Err(
|
||||
anyhow!("stdout: {} | stderr: {}", log.stdout, log.stderr)
|
||||
.context("failed to get aws ecr login token"),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,2 +1 @@
|
||||
pub mod ec2;
|
||||
pub mod ecr;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user