mirror of
https://github.com/moghtech/komodo.git
synced 2026-03-15 13:10:49 -05:00
Compare commits
248 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1011ec60ab | ||
|
|
48e17a7c87 | ||
|
|
a94baded55 | ||
|
|
e97c0873cf | ||
|
|
43a0b76811 | ||
|
|
2d2577e5ee | ||
|
|
202ac77de3 | ||
|
|
568c963419 | ||
|
|
5c3294241d | ||
|
|
648a04be88 | ||
|
|
1b5822f649 | ||
|
|
c41a008603 | ||
|
|
603243b0eb | ||
|
|
d09ab36696 | ||
|
|
ad168c87f7 | ||
|
|
914f4c6197 | ||
|
|
c73d918e18 | ||
|
|
9d116f56cb | ||
|
|
8a8dede5db | ||
|
|
d2cecf316c | ||
|
|
cad1ee123e | ||
|
|
6aa801b705 | ||
|
|
078ba59002 | ||
|
|
5eacb7191b | ||
|
|
45eafd10b9 | ||
|
|
42c486807c | ||
|
|
8c31fcff02 | ||
|
|
49f1d40ce8 | ||
|
|
bf85e886bd | ||
|
|
eda0b233ca | ||
|
|
5efb227851 | ||
|
|
1a45fffe75 | ||
|
|
fa72f2e5ef | ||
|
|
c9152db300 | ||
|
|
25fcca7246 | ||
|
|
ac449e38d5 | ||
|
|
d6c66948ba | ||
|
|
b6af790aef | ||
|
|
36a49210a0 | ||
|
|
d2b2aa0550 | ||
|
|
7f4c883416 | ||
|
|
676fb3c732 | ||
|
|
17da4bd2fa | ||
|
|
b44e57bbf6 | ||
|
|
6aa5b5faae | ||
|
|
9565855477 | ||
|
|
3504c083b4 | ||
|
|
5fdaa9a808 | ||
|
|
ec35b14077 | ||
|
|
158f3ad89b | ||
|
|
7257ecbaed | ||
|
|
a2a94f23ee | ||
|
|
03cad5b23b | ||
|
|
2460b5edf7 | ||
|
|
83fdb180aa | ||
|
|
9b1d32ebdf | ||
|
|
ea4ae7651c | ||
|
|
5f6fabd925 | ||
|
|
38d9495ab1 | ||
|
|
46ad5b3953 | ||
|
|
e60b817208 | ||
|
|
0ce5248292 | ||
|
|
050c29f4a3 | ||
|
|
8580728933 | ||
|
|
3c5868d111 | ||
|
|
40e1b1ff88 | ||
|
|
99641b2e39 | ||
|
|
f0e7757eb4 | ||
|
|
f7283b1fc1 | ||
|
|
771af21eae | ||
|
|
0dda791ec7 | ||
|
|
bc76b1c07e | ||
|
|
8b537924fb | ||
|
|
f5ce3570e4 | ||
|
|
f1e51d275c | ||
|
|
eaa10d96b5 | ||
|
|
037364068d | ||
|
|
2441bc8cbf | ||
|
|
92ac003910 | ||
|
|
693f24763f | ||
|
|
d9d44ceee1 | ||
|
|
30ab8ed17b | ||
|
|
2bf2be54cc | ||
|
|
b7ea680958 | ||
|
|
2a56d09f89 | ||
|
|
2612f742b2 | ||
|
|
29bdf5c71d | ||
|
|
873d9ea433 | ||
|
|
717f3afa89 | ||
|
|
ec31d1af01 | ||
|
|
9e5c52b9a4 | ||
|
|
762873d5be | ||
|
|
67fa512975 | ||
|
|
502dd3a4a8 | ||
|
|
8c22bdd473 | ||
|
|
ba6801da11 | ||
|
|
309802093c | ||
|
|
3d1e3009b3 | ||
|
|
fdc23c2650 | ||
|
|
072ee6834e | ||
|
|
bedbf76349 | ||
|
|
e26d1211cc | ||
|
|
0342ee4dd9 | ||
|
|
669d5c81b4 | ||
|
|
defbab5955 | ||
|
|
9405295e4a | ||
|
|
28c077ed4c | ||
|
|
61406c1b00 | ||
|
|
64638730b9 | ||
|
|
c0942c6d1d | ||
|
|
ff964cd0fe | ||
|
|
d56f632a11 | ||
|
|
a7f22b6cfb | ||
|
|
6053fc1d99 | ||
|
|
573ff1863c | ||
|
|
dd4a9b0cb5 | ||
|
|
d243cf2da7 | ||
|
|
4e06e788ae | ||
|
|
a0f71f8af5 | ||
|
|
fcbb75d0c0 | ||
|
|
0a8419bb13 | ||
|
|
40fe76cf27 | ||
|
|
5594d3c1d9 | ||
|
|
b12aeb259f | ||
|
|
b121b0ac07 | ||
|
|
a9f1d91b1b | ||
|
|
abf48d0243 | ||
|
|
447690d8bf | ||
|
|
a70c0a2697 | ||
|
|
0758e6ff81 | ||
|
|
ea0e059ee1 | ||
|
|
c9e0524794 | ||
|
|
81ceaf1eae | ||
|
|
37c07ff748 | ||
|
|
62e8943ebe | ||
|
|
99ccffbc38 | ||
|
|
84dc29b77f | ||
|
|
81bab4aa50 | ||
|
|
9fa2fd0f58 | ||
|
|
3745967690 | ||
|
|
e8cfc13342 | ||
|
|
ec47bb11ee | ||
|
|
d008c95853 | ||
|
|
4986d70506 | ||
|
|
1372a5fb39 | ||
|
|
f54224650f | ||
|
|
2eee1459e7 | ||
|
|
5a3fd891c4 | ||
|
|
ba3f288c2d | ||
|
|
6d5fd7dc5d | ||
|
|
df3fd7c4e9 | ||
|
|
395f032ee2 | ||
|
|
de2bd800c4 | ||
|
|
75352a91ff | ||
|
|
9b12270d04 | ||
|
|
7fc378798f | ||
|
|
3db2c93303 | ||
|
|
150d6562bf | ||
|
|
c3b549b051 | ||
|
|
931f2bd92d | ||
|
|
6b6324d79c | ||
|
|
2c65d924f9 | ||
|
|
dd1fecf190 | ||
|
|
aa96a37db4 | ||
|
|
ec9e9638f5 | ||
|
|
e33019cab8 | ||
|
|
951cb82e0c | ||
|
|
0643f96053 | ||
|
|
56d835f2d2 | ||
|
|
d8fb8f8649 | ||
|
|
7197d628e5 | ||
|
|
96083178dd | ||
|
|
9d1b705ab1 | ||
|
|
2582bc9ba3 | ||
|
|
44f34b9b40 | ||
|
|
bbb18d8280 | ||
|
|
da95b7d074 | ||
|
|
6b25309aed | ||
|
|
f8e371af31 | ||
|
|
a0f5ae8c7f | ||
|
|
2f371af288 | ||
|
|
76840efddc | ||
|
|
8f01e441a4 | ||
|
|
41a6e0a65a | ||
|
|
40027f7430 | ||
|
|
a2c69aba87 | ||
|
|
a5d3fbedc6 | ||
|
|
b311b11785 | ||
|
|
7a0b29b387 | ||
|
|
d3a87fdb5f | ||
|
|
9b7ab6d98a | ||
|
|
c302e28d86 | ||
|
|
33be989e3a | ||
|
|
c9d65300c9 | ||
|
|
e96b676366 | ||
|
|
0bff4a5e51 | ||
|
|
9b12334922 | ||
|
|
68659630fc | ||
|
|
8b33647620 | ||
|
|
871aba62d5 | ||
|
|
c649094a8a | ||
|
|
e3c11db89e | ||
|
|
c43293109d | ||
|
|
d3e4f9f638 | ||
|
|
eecf583b0e | ||
|
|
a518806d8b | ||
|
|
8a4611c380 | ||
|
|
d679fbe72f | ||
|
|
1cf02bc4b4 | ||
|
|
bf703eef35 | ||
|
|
985058afb0 | ||
|
|
eac1145958 | ||
|
|
1b408d92d9 | ||
|
|
ee95c2c76b | ||
|
|
e83124ebff | ||
|
|
e912ae050a | ||
|
|
99253d6182 | ||
|
|
ef91577ac5 | ||
|
|
b97f9b30b3 | ||
|
|
7cb11dbc5d | ||
|
|
6d815629fc | ||
|
|
f8021d8541 | ||
|
|
1f444fdbc2 | ||
|
|
af76dd1be4 | ||
|
|
5cb91c6f8d | ||
|
|
de5502aec7 | ||
|
|
ef4ae4c5f2 | ||
|
|
866eb6d81b | ||
|
|
58d6c16eea | ||
|
|
ccbf13ae84 | ||
|
|
21f6acd3d7 | ||
|
|
dce59d1383 | ||
|
|
2fb544c3b0 | ||
|
|
1ba288be79 | ||
|
|
1ff21d2986 | ||
|
|
79cc2c1bb7 | ||
|
|
17b2e6660c | ||
|
|
4ef095fe55 | ||
|
|
fb0a7352e3 | ||
|
|
9a087e5975 | ||
|
|
814e47031d | ||
|
|
1304565e40 | ||
|
|
85616d0669 | ||
|
|
feff4647e7 | ||
|
|
549e15bfe2 | ||
|
|
a08baf8432 | ||
|
|
99c47ce133 | ||
|
|
26a4691c0b |
1
.vscode/resolver.code-snippets
vendored
1
.vscode/resolver.code-snippets
vendored
@@ -3,7 +3,6 @@
|
||||
"scope": "rust",
|
||||
"prefix": "resolve",
|
||||
"body": [
|
||||
"#[async_trait]",
|
||||
"impl Resolve<${1}, User> for State {",
|
||||
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
|
||||
"\t\ttodo!()",
|
||||
|
||||
1531
Cargo.lock
generated
1531
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
60
Cargo.toml
60
Cargo.toml
@@ -3,65 +3,71 @@ resolver = "2"
|
||||
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.2.0"
|
||||
version = "1.7.0"
|
||||
edition = "2021"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/mbecker20/monitor"
|
||||
homepage = "https://docs.monitor.mogh.tech"
|
||||
|
||||
[patch.crates-io]
|
||||
monitor_client = { path = "client/core/rs" }
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
monitor_client = { path = "client/core/rs" }
|
||||
monitor_client = "1.6.2"
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
command = { path = "lib/command" }
|
||||
logger = { path = "lib/logger" }
|
||||
git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.3.4", features = ["axum"] }
|
||||
serror = { version = "0.4.3", default-features = false }
|
||||
slack = { version = "0.1.0", package = "slack_client_rs" }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
merge_config_files = "0.1.5"
|
||||
termination_signal = "0.1.3"
|
||||
async_timing_util = "0.1.14"
|
||||
partial_derive2 = "0.4.2"
|
||||
derive_variants = "0.1.3"
|
||||
mongo_indexed = "0.2.2"
|
||||
resolver_api = "0.1.9"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "0.3.0"
|
||||
resolver_api = "1.1.0"
|
||||
parse_csl = "0.1.0"
|
||||
mungos = "0.5.4"
|
||||
svi = "0.1.4"
|
||||
mungos = "0.5.6"
|
||||
svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
tokio = { version = "1.37.0", features = ["full"] }
|
||||
tokio = { version = "1.38.0", features = ["full"] }
|
||||
reqwest = { version = "0.12.4", features = ["json"] }
|
||||
tokio-util = "0.7.11"
|
||||
futures = "0.3.30"
|
||||
futures-util = "0.3.30"
|
||||
async-trait = "0.1.80"
|
||||
|
||||
# SERVER
|
||||
axum = { version = "0.7.5", features = ["ws", "json"] }
|
||||
axum-extra = { version = "0.9.3", features = ["typed-header"] }
|
||||
tower = { version = "0.4.13", features = ["timeout"] }
|
||||
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
|
||||
tokio-tungstenite = "0.21.0"
|
||||
tokio-tungstenite = "0.22.0"
|
||||
|
||||
# SER/DE
|
||||
serde = { version = "1.0.201", features = ["derive"] }
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
strum = { version = "0.26.2", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
toml = "0.8.12"
|
||||
serde_json = "1.0.117"
|
||||
toml = "0.8.13"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.83"
|
||||
thiserror = "1.0.60"
|
||||
anyhow = "1.0.86"
|
||||
thiserror = "1.0.61"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry_sdk = { version = "0.22.1", features = ["rt-tokio"] }
|
||||
opentelemetry_sdk = { version = "0.23.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.18", features = ["json"] }
|
||||
tracing-opentelemetry = "0.23.0"
|
||||
opentelemetry-otlp = "0.15.0"
|
||||
opentelemetry = "0.22.0"
|
||||
tracing-opentelemetry = "0.24.0"
|
||||
opentelemetry-otlp = "0.16.0"
|
||||
opentelemetry = "0.23.0"
|
||||
tracing = "0.1.40"
|
||||
|
||||
# CONFIG
|
||||
@@ -72,11 +78,12 @@ envy = "0.4.2"
|
||||
# CRYPTO
|
||||
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
|
||||
urlencoding = "2.1.3"
|
||||
rand = "0.8.5"
|
||||
jwt = "0.16.0"
|
||||
bcrypt = "0.15.1"
|
||||
base64 = "0.22.1"
|
||||
hmac = "0.12.1"
|
||||
sha2 = "0.10.8"
|
||||
bcrypt = "0.15.1"
|
||||
rand = "0.8.5"
|
||||
jwt = "0.16.0"
|
||||
hex = "0.4.3"
|
||||
|
||||
# SYSTEM
|
||||
@@ -84,10 +91,11 @@ bollard = "0.16.1"
|
||||
sysinfo = "0.30.12"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.3.0"
|
||||
aws-sdk-ec2 = "1.40.0"
|
||||
aws-config = "1.5.0"
|
||||
aws-sdk-ec2 = "1.46.0"
|
||||
|
||||
# MISC
|
||||
derive_builder = "0.20.0"
|
||||
typeshare = "1.0.3"
|
||||
colored = "2.1.0"
|
||||
bson = "2.10.0"
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
[package]
|
||||
name = "alert_logger"
|
||||
name = "alerter"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
4
bin/alerter/README.md
Normal file
4
bin/alerter/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Alerter
|
||||
|
||||
This crate sets up a basic axum server that listens for incoming alert POSTs.
|
||||
It can be used as a monitor alerting endpoint, and serves as a template for other custom alerter implementations.
|
||||
@@ -5,6 +5,8 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "monitor"
|
||||
@@ -13,12 +15,13 @@ path = "src/main.rs"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local client
|
||||
# local
|
||||
monitor_client.workspace = true
|
||||
# mogh
|
||||
partial_derive2.workspace = true
|
||||
# external
|
||||
tracing-subscriber.workspace = true
|
||||
merge_config_files.workspace = true
|
||||
serde_json.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
# Monitor CLI
|
||||
|
||||
Monitor CLI is a command line tool to sync monitor resources and execute file defined procedures.
|
||||
Monitor CLI is a tool to sync monitor resources and execute operations.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
cargo install monitor_cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Credentials
|
||||
|
||||
Configure a file `~/.config/monitor/creds.toml` file with contents:
|
||||
```toml
|
||||
url = "https://your.monitor.address"
|
||||
@@ -12,8 +20,13 @@ secret = "YOUR-API-SECRET"
|
||||
```
|
||||
|
||||
Note. You can specify a different creds file by using `--creds ./other/path.toml`.
|
||||
You can also bypass using any file and pass the information using `--url`, `--key`, `--secret`:
|
||||
|
||||
With your creds in place, you can run syncs:
|
||||
```sh
|
||||
monitor --url "https://your.monitor.address" --key "YOUR-API-KEY" --secret "YOUR-API-SECRET" ...
|
||||
```
|
||||
|
||||
### Run Syncs
|
||||
|
||||
```sh
|
||||
## Sync resources in a single file
|
||||
@@ -26,9 +39,54 @@ monitor sync ./resources
|
||||
monitor sync
|
||||
```
|
||||
|
||||
And executions:
|
||||
#### Manual
|
||||
```md
|
||||
Runs syncs on resource files
|
||||
|
||||
Usage: monitor sync [OPTIONS] [PATH]
|
||||
|
||||
Arguments:
|
||||
[PATH] The path of the resource folder / file Folder paths will recursively incorporate all the resources it finds under the folder [default: ./resources]
|
||||
|
||||
Options:
|
||||
--delete Will delete any resources that aren't included in the resource files
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
### Run Executions
|
||||
|
||||
```sh
|
||||
## Execute a TOML defined procedure
|
||||
monitor exec ./execution/execution.toml
|
||||
```
|
||||
# Triggers an example build
|
||||
monitor execute run-build test_build
|
||||
```
|
||||
|
||||
#### Manual
|
||||
```md
|
||||
Runs an execution
|
||||
|
||||
Usage: monitor execute <COMMAND>
|
||||
|
||||
Commands:
|
||||
none The "null" execution. Does nothing
|
||||
run-procedure Runs the target procedure. Response: [Update]
|
||||
run-build Runs the target build. Response: [Update]
|
||||
deploy Deploys the container for the target deployment. Response: [Update]
|
||||
start-container Starts the container for the target deployment. Response: [Update]
|
||||
stop-container Stops the container for the target deployment. Response: [Update]
|
||||
stop-all-containers Stops all deployments on the target server. Response: [Update]
|
||||
remove-container Stops and removes the container for the target deployment. Reponse: [Update]
|
||||
clone-repo Clones the target repo. Response: [Update]
|
||||
pull-repo Pulls the target repo. Response: [Update]
|
||||
prune-networks Prunes the docker networks on the target server. Response: [Update]
|
||||
prune-images Prunes the docker images on the target server. Response: [Update]
|
||||
prune-containers Prunes the docker containers on the target server. Response: [Update]
|
||||
help Print this message or the help of the given subcommand(s)
|
||||
|
||||
Options:
|
||||
-h, --help Print help
|
||||
```
|
||||
|
||||
### --yes
|
||||
|
||||
You can use `--yes` to avoid any human prompt to continue, for use in automated environments.
|
||||
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
[[build]]
|
||||
name = "monitor_core"
|
||||
description = "Public monitor core build"
|
||||
tags = ["monitor"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "bin/core/Dockerfile"
|
||||
|
||||
[[build]]
|
||||
name = "monitor_core_dev"
|
||||
description = ""
|
||||
tags = ["monitor", "dev"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "bin/core/Dockerfile"
|
||||
|
||||
[[build]]
|
||||
name = "monitor_frontend"
|
||||
description = "standalone hosted frontend for monitor.mogh.tech"
|
||||
tags = ["monitor", "frontend"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "frontend/Dockerfile"
|
||||
|
||||
[[build.config.build_args]]
|
||||
variable = "VITE_MONITOR_HOST"
|
||||
value = "https://monitor.api.mogh.tech"
|
||||
|
||||
[[build]]
|
||||
name = "monitor_frontend_dev"
|
||||
description = "standalone hosted frontend for monitor-dev.mogh.tech"
|
||||
tags = ["monitor", "frontend"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "frontend/Dockerfile"
|
||||
|
||||
[[build.config.build_args]]
|
||||
variable = "VITE_MONITOR_HOST"
|
||||
value = "https://monitor-dev.api.mogh.tech"
|
||||
|
||||
## BUILDER
|
||||
|
||||
[[builder]]
|
||||
name = "mogh-builder"
|
||||
description = ""
|
||||
tags = []
|
||||
|
||||
[builder.config]
|
||||
type = "Aws"
|
||||
|
||||
[builder.config.params]
|
||||
region = "us-east-2"
|
||||
instance_type = "c5.2xlarge"
|
||||
volume_gb = 20
|
||||
port = 8120
|
||||
ami_id = "ami-0005a05fa63a080ab"
|
||||
subnet_id = "subnet-02ae5ad480eacc4bc"
|
||||
security_group_ids = ["sg-049d98c819f9ace58", "sg-006c0ca638af8eb44"]
|
||||
key_pair_name = "mogh-key"
|
||||
assign_public_ip = true
|
||||
use_public_ip = false
|
||||
github_accounts = []
|
||||
docker_accounts = []
|
||||
@@ -1,213 +0,0 @@
|
||||
## MONITOR PROXY
|
||||
[[deployment]]
|
||||
name = "monitor-proxy"
|
||||
description = "An NGINX proxy for mogh.tech"
|
||||
tags = ["monitor"]
|
||||
config.server_id = "monitor-01"
|
||||
config.network = "host"
|
||||
config.restart = "on-failure"
|
||||
config.image.type = "Image"
|
||||
config.image.params.image = "jc21/nginx-proxy-manager"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/nginx/data"
|
||||
container = "/data"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/nginx/letsencrypt"
|
||||
container = "/etc/letsencrypt"
|
||||
|
||||
## MONITOR MONGO
|
||||
[[deployment]]
|
||||
name = "monitor-mongo"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "no"
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "mongo"
|
||||
|
||||
## MONITOR CORE
|
||||
[[deployment]]
|
||||
name = "monitor-core"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "no"
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "mbecker2020/monitor_core"
|
||||
|
||||
## GRAFANA
|
||||
[[deployment]]
|
||||
name = "grafana"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "grafana/grafana"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/grafana"
|
||||
container = "/var/lib/grafana"
|
||||
|
||||
[[deployment.config.environment]]
|
||||
variable = "GF_SERVER_HTTP_PORT"
|
||||
value = "3080"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "key-value"
|
||||
|
||||
## LOKI
|
||||
[[deployment]]
|
||||
name = "loki"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "grafana/loki"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/loki"
|
||||
container = "/loki"
|
||||
|
||||
[[deployment]]
|
||||
name = "tempo"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
command = "-server.http-listen-port=3200 -server.grpc-listen-port=9096 --storage.trace.backend=local --storage.trace.local.path=/tmp/tempo/traces --storage.trace.wal.path=/tmp/tempo/wal"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "grafana/tempo"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/tempo"
|
||||
container = "/tmp/tempo"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "key-value"
|
||||
|
||||
## VECTOR
|
||||
[[deployment]]
|
||||
name = "vector"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
command = "--config /etc/vector/*.toml"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "timberio/vector:latest-debian"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/home/ubuntu/.config/vector"
|
||||
container = "/etc/vector"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/vector"
|
||||
container = "/var/lib/vector"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/var/run/docker.sock"
|
||||
container = "/var/run/docker.sock"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "key-value"
|
||||
|
||||
## MONITOR CORE DEV
|
||||
[[deployment]]
|
||||
name = "monitor-core-dev"
|
||||
description = ""
|
||||
tags = ["monitor", "dev"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
redeploy_on_build = true
|
||||
network = "host"
|
||||
restart = "no"
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Build"
|
||||
params.build_id = "monitor_core"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/home/ubuntu/.config/monitor/dev.core.config.toml"
|
||||
container = "/config/config.toml"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/repos/monitor-dev-frontend/frontend/dist"
|
||||
container = "/frontend"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "rust"
|
||||
|
||||
## MONITOR FRONTEND
|
||||
[[deployment]]
|
||||
name = "monitor-frontend"
|
||||
description = ""
|
||||
tags = ["monitor", "frontend"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
redeploy_on_build = true
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
image.type = "Build"
|
||||
image.params.build = "monitor_frontend"
|
||||
|
||||
## MONITOR DEV FRONTEND
|
||||
[[deployment]]
|
||||
name = "monitor-dev-frontend"
|
||||
description = ""
|
||||
tags = ["monitor", "dev", "frontend"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
redeploy_on_build = true
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
image.type = "Build"
|
||||
image.params.build = "monitor_frontend_dev"
|
||||
|
||||
[[deployment.config.environment]]
|
||||
variable = "PORT"
|
||||
value = "4175"
|
||||
@@ -1,8 +0,0 @@
|
||||
[[procedure]]
|
||||
name = "test-procedure"
|
||||
description = ""
|
||||
tags = []
|
||||
|
||||
[procedure.config]
|
||||
procedure_type = "Sequence"
|
||||
executions = []
|
||||
@@ -1,37 +0,0 @@
|
||||
# [[repo]]
|
||||
# name = "monitor-dev-frontend"
|
||||
# description = "Used as frontend for monitor-core-dev"
|
||||
# tags = ["monitor", "dev"]
|
||||
|
||||
# [repo.config]
|
||||
# server_id = "monitor-01"
|
||||
# repo = "mbecker20/monitor"
|
||||
# branch = "main"
|
||||
# github_account = ""
|
||||
|
||||
# [repo.config.on_clone]
|
||||
# path = ""
|
||||
# command = ""
|
||||
|
||||
# [repo.config.on_pull]
|
||||
# path = "frontend"
|
||||
# command = "sh on_pull.sh"
|
||||
|
||||
[[repo]]
|
||||
name = "monitor-periphery"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[repo.config]
|
||||
server_id = "monitor-01"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
github_account = ""
|
||||
|
||||
[repo.config.on_clone]
|
||||
path = ""
|
||||
command = ""
|
||||
|
||||
[repo.config.on_pull]
|
||||
path = "."
|
||||
command = "/root/.cargo/bin/cargo build -p monitor_periphery --release"
|
||||
@@ -1,51 +0,0 @@
|
||||
[[server]]
|
||||
name = "monitor-01"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[server.config]
|
||||
address = "http://localhost:8120"
|
||||
enabled = true
|
||||
stats_monitoring = true
|
||||
auto_prune = true
|
||||
send_unreachable_alerts = true
|
||||
send_cpu_alerts = true
|
||||
send_mem_alerts = true
|
||||
send_disk_alerts = true
|
||||
region = "us-east-2"
|
||||
|
||||
## TEMPLATE
|
||||
|
||||
[[server_template]]
|
||||
name = "mogh-template"
|
||||
description = ""
|
||||
tags = []
|
||||
|
||||
[server_template.config]
|
||||
type = "Aws"
|
||||
|
||||
[server_template.config.params]
|
||||
region = "us-east-2"
|
||||
instance_type = "t3.medium"
|
||||
ami_id = "ami-0005a05fa63a080ab"
|
||||
subnet_id = "subnet-02ae5ad480eacc4bc"
|
||||
key_pair_name = "mogh-key"
|
||||
assign_public_ip = true
|
||||
use_public_ip = false
|
||||
port = 8120
|
||||
user_data = ""
|
||||
security_group_ids = ["sg-049d98c819f9ace58", "sg-006c0ca638af8eb44"]
|
||||
|
||||
[[server_template.config.params.volumes]]
|
||||
device_name = "/dev/sda1"
|
||||
size_gb = 20
|
||||
volume_type = "gp2"
|
||||
iops = 0
|
||||
throughput = 0
|
||||
|
||||
[[server_template.config.params.volumes]]
|
||||
device_name = "/dev/sdb"
|
||||
size_gb = 10
|
||||
volume_type = "gp3"
|
||||
iops = 0
|
||||
throughput = 0
|
||||
66
bin/cli/src/args.rs
Normal file
66
bin/cli/src/args.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use monitor_client::api::execute::Execution;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
pub struct CliArgs {
|
||||
/// Sync or Exec
|
||||
#[command(subcommand)]
|
||||
pub command: Command,
|
||||
|
||||
/// The path to a creds file.
|
||||
///
|
||||
/// Note: If each of `url`, `key` and `secret` are passed,
|
||||
/// no file is required at this path.
|
||||
#[arg(long, default_value_t = default_creds())]
|
||||
pub creds: String,
|
||||
|
||||
/// Pass url in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub url: Option<String>,
|
||||
/// Pass api key in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub key: Option<String>,
|
||||
/// Pass api secret in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub secret: Option<String>,
|
||||
|
||||
/// Always continue on user confirmation prompts.
|
||||
#[arg(long, short, default_value_t = false)]
|
||||
pub yes: bool,
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home = std::env::var("HOME")
|
||||
.expect("no HOME env var. cannot get default config path.");
|
||||
format!("{home}/.config/monitor/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
pub enum Command {
|
||||
/// Runs syncs on resource files
|
||||
Sync {
|
||||
/// The path of the resource folder / file
|
||||
/// Folder paths will recursively incorporate all the resources it finds under the folder
|
||||
#[arg(default_value_t = String::from("./resources"))]
|
||||
path: String,
|
||||
|
||||
/// Will delete any resources that aren't included in the resource files.
|
||||
#[arg(long, default_value_t = false)]
|
||||
delete: bool,
|
||||
},
|
||||
|
||||
/// Runs an execution
|
||||
Execute {
|
||||
#[command(subcommand)]
|
||||
execution: Execution,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CredsFile {
|
||||
pub url: String,
|
||||
pub key: String,
|
||||
pub secret: String,
|
||||
}
|
||||
130
bin/cli/src/exec.rs
Normal file
130
bin/cli/src/exec.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::api::execute::Execution;
|
||||
|
||||
use crate::{
|
||||
helpers::wait_for_enter,
|
||||
state::{cli_args, monitor_client},
|
||||
};
|
||||
|
||||
pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
if matches!(execution, Execution::None(_)) {
|
||||
println!("Got 'none' execution. Doing nothing...");
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
println!("Finished doing nothing. Exiting...");
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
println!("\n{}: Execution", "Mode".dimmed());
|
||||
match &execution {
|
||||
Execution::None(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Deploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RemoveContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
}
|
||||
|
||||
if !cli_args().yes {
|
||||
wait_for_enter("run execution")?;
|
||||
}
|
||||
|
||||
info!("Running Execution...");
|
||||
|
||||
let res = match execution {
|
||||
Execution::RunProcedure(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::RunBuild(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::Deploy(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::StartContainer(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::StopContainer(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::StopAllContainers(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::RemoveContainer(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::CloneRepo(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::PullRepo(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::PruneNetworks(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::PruneImages(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::PruneContainers(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::RunSync(request) => {
|
||||
monitor_client().execute(request).await
|
||||
}
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
println!("Finished sleeping!");
|
||||
std::process::exit(0)
|
||||
}
|
||||
Execution::None(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(update) => println!("\n{}: {update:#?}", "SUCCESS".green()),
|
||||
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::api::execute;
|
||||
use serde::Deserialize;
|
||||
use strum::Display;
|
||||
|
||||
use crate::monitor_client;
|
||||
|
||||
pub async fn run_execution(path: &Path) -> anyhow::Result<()> {
|
||||
let ExecutionFile { name, stages } = crate::parse_toml_file(path)?;
|
||||
|
||||
info!("EXECUTION: {name}");
|
||||
info!("path: {path:?}");
|
||||
println!("{stages:#?}");
|
||||
|
||||
crate::wait_for_enter("EXECUTE")?;
|
||||
|
||||
run_stages(stages)
|
||||
.await
|
||||
.context("failed during a stage. terminating run.")?;
|
||||
|
||||
info!("finished successfully ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Specifies sequence of stages (build / deploy) on resources
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ExecutionFile {
|
||||
pub name: String,
|
||||
#[serde(rename = "stage")]
|
||||
pub stages: Vec<Stage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct Stage {
|
||||
pub name: String,
|
||||
pub action: ExecutionType,
|
||||
/// resource names
|
||||
pub targets: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Deserialize, Display)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum ExecutionType {
|
||||
Build,
|
||||
Deploy,
|
||||
StartContainer,
|
||||
StopContainer,
|
||||
DestroyContainer,
|
||||
}
|
||||
|
||||
pub async fn run_stages(stages: Vec<Stage>) -> anyhow::Result<()> {
|
||||
for Stage {
|
||||
name,
|
||||
action,
|
||||
targets,
|
||||
} in stages
|
||||
{
|
||||
info!("running {action} stage: {name}... ⏳");
|
||||
match action {
|
||||
ExecutionType::Build => {
|
||||
trigger_builds_in_parallel(&targets).await?;
|
||||
}
|
||||
ExecutionType::Deploy => {
|
||||
redeploy_deployments_in_parallel(&targets).await?;
|
||||
}
|
||||
ExecutionType::StartContainer => {
|
||||
start_containers_in_parallel(&targets).await?
|
||||
}
|
||||
ExecutionType::StopContainer => {
|
||||
stop_containers_in_parallel(&targets).await?
|
||||
}
|
||||
ExecutionType::DestroyContainer => {
|
||||
destroy_containers_in_parallel(&targets).await?;
|
||||
}
|
||||
}
|
||||
info!("finished {action} stage: {name} ✅");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn redeploy_deployments_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::Deploy { deployment: deployment.to_string(), stop_signal: None, stop_time: None })
|
||||
.await
|
||||
.with_context(|| format!("failed to deploy {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to deploy {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn start_containers_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::StartContainer { deployment: deployment.to_string() })
|
||||
.await
|
||||
.with_context(|| format!("failed to start container {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to start container {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn stop_containers_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::StopContainer { deployment: deployment.to_string(), signal: None, time: None })
|
||||
.await
|
||||
.with_context(|| format!("failed to stop container {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to stop container {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn destroy_containers_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::RemoveContainer { deployment: deployment.to_string(), signal: None, time: None })
|
||||
.await
|
||||
.with_context(|| format!("failed to destroy container {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to destroy container {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn trigger_builds_in_parallel(
|
||||
builds: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = builds.iter().map(|build| async move {
|
||||
monitor_client()
|
||||
.execute(execute::RunBuild { build: build.to_string() })
|
||||
.await
|
||||
.with_context(|| format!("failed to build {build}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to build {build}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
17
bin/cli/src/helpers.rs
Normal file
17
bin/cli/src/helpers.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
|
||||
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,113 +1,32 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use std::{io::Read, path::PathBuf, str::FromStr, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::{Parser, Subcommand};
|
||||
use colored::Colorize;
|
||||
use monitor_client::{api::read, MonitorClient};
|
||||
use serde::{de::DeserializeOwned, Deserialize};
|
||||
use monitor_client::api::read::GetVersion;
|
||||
|
||||
mod execution;
|
||||
mod args;
|
||||
mod exec;
|
||||
mod helpers;
|
||||
mod maps;
|
||||
mod state;
|
||||
mod sync;
|
||||
|
||||
fn cli_args() -> &'static CliArgs {
|
||||
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(CliArgs::parse)
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct CliArgs {
|
||||
/// Sync or Exec
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
/// The path to a creds file.
|
||||
#[arg(long, default_value_t = default_creds())]
|
||||
creds: String,
|
||||
/// Log less (just resource names).
|
||||
#[arg(long, default_value_t = false)]
|
||||
quiet: bool,
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home = std::env::var("HOME")
|
||||
.expect("no HOME env var. cannot get default config path.");
|
||||
format!("{home}/.config/monitor/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
enum Command {
|
||||
/// Runs syncs on resource files
|
||||
Sync {
|
||||
/// The path of the resource folder / file
|
||||
/// Folder paths will recursively incorporate all the resources it finds under the folder
|
||||
#[arg(default_value_t = String::from("./resources"))]
|
||||
path: String,
|
||||
},
|
||||
|
||||
/// Runs execution files
|
||||
Exec {
|
||||
/// The path of the exec file
|
||||
path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct CredsFile {
|
||||
url: String,
|
||||
key: String,
|
||||
secret: String,
|
||||
}
|
||||
|
||||
fn monitor_client() -> &'static MonitorClient {
|
||||
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
|
||||
MONITOR_CLIENT.get_or_init(|| {
|
||||
let CredsFile { url, key, secret } =
|
||||
parse_toml_file(&cli_args().creds)
|
||||
.expect("failed to parse monitor credentials");
|
||||
futures::executor::block_on(MonitorClient::new(url, key, secret))
|
||||
.expect("failed to initialize monitor client")
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt::init();
|
||||
tracing_subscriber::fmt().with_target(false).init();
|
||||
|
||||
let version =
|
||||
monitor_client().read(read::GetVersion {}).await?.version;
|
||||
info!("monitor version: {version}");
|
||||
state::monitor_client().read(GetVersion {}).await?.version;
|
||||
info!("monitor version: {}", version.to_string().blue().bold());
|
||||
|
||||
match &cli_args().command {
|
||||
Command::Exec { path } => execution::run_execution(path).await?,
|
||||
Command::Sync { path } => {
|
||||
sync::run_sync(&PathBuf::from_str(path)?).await?
|
||||
match &state::cli_args().command {
|
||||
args::Command::Sync { path, delete } => {
|
||||
sync::run(path, *delete).await?
|
||||
}
|
||||
args::Command::Execute { execution } => {
|
||||
exec::run(execution.to_owned()).await?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_toml_file<T: DeserializeOwned>(
|
||||
path: impl AsRef<std::path::Path>,
|
||||
) -> anyhow::Result<T> {
|
||||
let contents = std::fs::read_to_string(path)
|
||||
.context("failed to read file contents")?;
|
||||
toml::from_str(&contents).context("failed to parse toml contents")
|
||||
}
|
||||
|
||||
fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -3,22 +3,22 @@ use std::{collections::HashMap, sync::OnceLock};
|
||||
use monitor_client::{
|
||||
api::read,
|
||||
entities::{
|
||||
alerter::AlerterListItem, build::BuildListItem,
|
||||
builder::BuilderListItem, deployment::DeploymentListItem,
|
||||
procedure::ProcedureListItem, repo::RepoListItem,
|
||||
server::ServerListItem, server_template::ServerTemplateListItem,
|
||||
tag::Tag, user::User, user_group::UserGroup,
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
sync::ResourceSync, tag::Tag, user::User, user_group::UserGroup,
|
||||
variable::Variable,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::monitor_client;
|
||||
use crate::state::monitor_client;
|
||||
|
||||
pub fn name_to_build() -> &'static HashMap<String, BuildListItem> {
|
||||
static NAME_TO_BUILD: OnceLock<HashMap<String, BuildListItem>> =
|
||||
pub fn name_to_build() -> &'static HashMap<String, Build> {
|
||||
static NAME_TO_BUILD: OnceLock<HashMap<String, Build>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_BUILD.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilds::default()),
|
||||
monitor_client().read(read::ListFullBuilds::default()),
|
||||
)
|
||||
.expect("failed to get builds from monitor")
|
||||
.into_iter()
|
||||
@@ -27,12 +27,12 @@ pub fn name_to_build() -> &'static HashMap<String, BuildListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_build() -> &'static HashMap<String, BuildListItem> {
|
||||
static ID_TO_BUILD: OnceLock<HashMap<String, BuildListItem>> =
|
||||
pub fn id_to_build() -> &'static HashMap<String, Build> {
|
||||
static ID_TO_BUILD: OnceLock<HashMap<String, Build>> =
|
||||
OnceLock::new();
|
||||
ID_TO_BUILD.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilds::default()),
|
||||
monitor_client().read(read::ListFullBuilds::default()),
|
||||
)
|
||||
.expect("failed to get builds from monitor")
|
||||
.into_iter()
|
||||
@@ -41,14 +41,12 @@ pub fn id_to_build() -> &'static HashMap<String, BuildListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_deployment(
|
||||
) -> &'static HashMap<String, DeploymentListItem> {
|
||||
static NAME_TO_DEPLOYMENT: OnceLock<
|
||||
HashMap<String, DeploymentListItem>,
|
||||
> = OnceLock::new();
|
||||
pub fn name_to_deployment() -> &'static HashMap<String, Deployment> {
|
||||
static NAME_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_DEPLOYMENT.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListDeployments::default()),
|
||||
monitor_client().read(read::ListFullDeployments::default()),
|
||||
)
|
||||
.expect("failed to get deployments from monitor")
|
||||
.into_iter()
|
||||
@@ -57,14 +55,12 @@ pub fn name_to_deployment(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_deployment(
|
||||
) -> &'static HashMap<String, DeploymentListItem> {
|
||||
static ID_TO_DEPLOYMENT: OnceLock<
|
||||
HashMap<String, DeploymentListItem>,
|
||||
> = OnceLock::new();
|
||||
pub fn id_to_deployment() -> &'static HashMap<String, Deployment> {
|
||||
static ID_TO_DEPLOYMENT: OnceLock<HashMap<String, Deployment>> =
|
||||
OnceLock::new();
|
||||
ID_TO_DEPLOYMENT.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListDeployments::default()),
|
||||
monitor_client().read(read::ListFullDeployments::default()),
|
||||
)
|
||||
.expect("failed to get deployments from monitor")
|
||||
.into_iter()
|
||||
@@ -73,12 +69,12 @@ pub fn id_to_deployment(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_server() -> &'static HashMap<String, ServerListItem> {
|
||||
static NAME_TO_SERVER: OnceLock<HashMap<String, ServerListItem>> =
|
||||
pub fn name_to_server() -> &'static HashMap<String, Server> {
|
||||
static NAME_TO_SERVER: OnceLock<HashMap<String, Server>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_SERVER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServers::default()),
|
||||
monitor_client().read(read::ListFullServers::default()),
|
||||
)
|
||||
.expect("failed to get servers from monitor")
|
||||
.into_iter()
|
||||
@@ -87,12 +83,12 @@ pub fn name_to_server() -> &'static HashMap<String, ServerListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_server() -> &'static HashMap<String, ServerListItem> {
|
||||
static ID_TO_SERVER: OnceLock<HashMap<String, ServerListItem>> =
|
||||
pub fn id_to_server() -> &'static HashMap<String, Server> {
|
||||
static ID_TO_SERVER: OnceLock<HashMap<String, Server>> =
|
||||
OnceLock::new();
|
||||
ID_TO_SERVER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServers::default()),
|
||||
monitor_client().read(read::ListFullServers::default()),
|
||||
)
|
||||
.expect("failed to get servers from monitor")
|
||||
.into_iter()
|
||||
@@ -101,13 +97,12 @@ pub fn id_to_server() -> &'static HashMap<String, ServerListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_builder() -> &'static HashMap<String, BuilderListItem>
|
||||
{
|
||||
static NAME_TO_BUILDER: OnceLock<HashMap<String, BuilderListItem>> =
|
||||
pub fn name_to_builder() -> &'static HashMap<String, Builder> {
|
||||
static NAME_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_BUILDER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilders::default()),
|
||||
monitor_client().read(read::ListFullBuilders::default()),
|
||||
)
|
||||
.expect("failed to get builders from monitor")
|
||||
.into_iter()
|
||||
@@ -116,12 +111,12 @@ pub fn name_to_builder() -> &'static HashMap<String, BuilderListItem>
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_builder() -> &'static HashMap<String, BuilderListItem> {
|
||||
static ID_TO_BUILDER: OnceLock<HashMap<String, BuilderListItem>> =
|
||||
pub fn id_to_builder() -> &'static HashMap<String, Builder> {
|
||||
static ID_TO_BUILDER: OnceLock<HashMap<String, Builder>> =
|
||||
OnceLock::new();
|
||||
ID_TO_BUILDER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilders::default()),
|
||||
monitor_client().read(read::ListFullBuilders::default()),
|
||||
)
|
||||
.expect("failed to get builders from monitor")
|
||||
.into_iter()
|
||||
@@ -130,13 +125,12 @@ pub fn id_to_builder() -> &'static HashMap<String, BuilderListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_alerter() -> &'static HashMap<String, AlerterListItem>
|
||||
{
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, AlerterListItem>> =
|
||||
pub fn name_to_alerter() -> &'static HashMap<String, Alerter> {
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListAlerters::default()),
|
||||
monitor_client().read(read::ListFullAlerters::default()),
|
||||
)
|
||||
.expect("failed to get alerters from monitor")
|
||||
.into_iter()
|
||||
@@ -145,12 +139,12 @@ pub fn name_to_alerter() -> &'static HashMap<String, AlerterListItem>
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_alerter() -> &'static HashMap<String, AlerterListItem> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, AlerterListItem>> =
|
||||
pub fn id_to_alerter() -> &'static HashMap<String, Alerter> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, Alerter>> =
|
||||
OnceLock::new();
|
||||
ID_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListAlerters::default()),
|
||||
monitor_client().read(read::ListFullAlerters::default()),
|
||||
)
|
||||
.expect("failed to get alerters from monitor")
|
||||
.into_iter()
|
||||
@@ -159,12 +153,12 @@ pub fn id_to_alerter() -> &'static HashMap<String, AlerterListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_repo() -> &'static HashMap<String, RepoListItem> {
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, RepoListItem>> =
|
||||
pub fn name_to_repo() -> &'static HashMap<String, Repo> {
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListRepos::default()),
|
||||
monitor_client().read(read::ListFullRepos::default()),
|
||||
)
|
||||
.expect("failed to get repos from monitor")
|
||||
.into_iter()
|
||||
@@ -173,12 +167,12 @@ pub fn name_to_repo() -> &'static HashMap<String, RepoListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_repo() -> &'static HashMap<String, RepoListItem> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, RepoListItem>> =
|
||||
pub fn id_to_repo() -> &'static HashMap<String, Repo> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, Repo>> =
|
||||
OnceLock::new();
|
||||
ID_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListRepos::default()),
|
||||
monitor_client().read(read::ListFullRepos::default()),
|
||||
)
|
||||
.expect("failed to get repos from monitor")
|
||||
.into_iter()
|
||||
@@ -187,14 +181,12 @@ pub fn id_to_repo() -> &'static HashMap<String, RepoListItem> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_procedure(
|
||||
) -> &'static HashMap<String, ProcedureListItem> {
|
||||
static NAME_TO_PROCEDURE: OnceLock<
|
||||
HashMap<String, ProcedureListItem>,
|
||||
> = OnceLock::new();
|
||||
pub fn name_to_procedure() -> &'static HashMap<String, Procedure> {
|
||||
static NAME_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_PROCEDURE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListProcedures::default()),
|
||||
monitor_client().read(read::ListFullProcedures::default()),
|
||||
)
|
||||
.expect("failed to get procedures from monitor")
|
||||
.into_iter()
|
||||
@@ -203,14 +195,12 @@ pub fn name_to_procedure(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_procedure() -> &'static HashMap<String, ProcedureListItem>
|
||||
{
|
||||
static ID_TO_PROCEDURE: OnceLock<
|
||||
HashMap<String, ProcedureListItem>,
|
||||
> = OnceLock::new();
|
||||
pub fn id_to_procedure() -> &'static HashMap<String, Procedure> {
|
||||
static ID_TO_PROCEDURE: OnceLock<HashMap<String, Procedure>> =
|
||||
OnceLock::new();
|
||||
ID_TO_PROCEDURE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListProcedures::default()),
|
||||
monitor_client().read(read::ListFullProcedures::default()),
|
||||
)
|
||||
.expect("failed to get procedures from monitor")
|
||||
.into_iter()
|
||||
@@ -220,13 +210,13 @@ pub fn id_to_procedure() -> &'static HashMap<String, ProcedureListItem>
|
||||
}
|
||||
|
||||
pub fn name_to_server_template(
|
||||
) -> &'static HashMap<String, ServerTemplateListItem> {
|
||||
) -> &'static HashMap<String, ServerTemplate> {
|
||||
static NAME_TO_SERVER_TEMPLATE: OnceLock<
|
||||
HashMap<String, ServerTemplateListItem>,
|
||||
HashMap<String, ServerTemplate>,
|
||||
> = OnceLock::new();
|
||||
NAME_TO_SERVER_TEMPLATE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServerTemplates::default()),
|
||||
monitor_client().read(read::ListFullServerTemplates::default()),
|
||||
)
|
||||
.expect("failed to get server templates from monitor")
|
||||
.into_iter()
|
||||
@@ -236,13 +226,13 @@ pub fn name_to_server_template(
|
||||
}
|
||||
|
||||
pub fn id_to_server_template(
|
||||
) -> &'static HashMap<String, ServerTemplateListItem> {
|
||||
) -> &'static HashMap<String, ServerTemplate> {
|
||||
static ID_TO_SERVER_TEMPLATE: OnceLock<
|
||||
HashMap<String, ServerTemplateListItem>,
|
||||
HashMap<String, ServerTemplate>,
|
||||
> = OnceLock::new();
|
||||
ID_TO_SERVER_TEMPLATE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServerTemplates::default()),
|
||||
monitor_client().read(read::ListFullServerTemplates::default()),
|
||||
)
|
||||
.expect("failed to get server templates from monitor")
|
||||
.into_iter()
|
||||
@@ -251,6 +241,36 @@ pub fn id_to_server_template(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_resource_sync(
|
||||
) -> &'static HashMap<String, ResourceSync> {
|
||||
static NAME_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_SYNC.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullResourceSyncs::default()),
|
||||
)
|
||||
.expect("failed to get syncs from monitor")
|
||||
.into_iter()
|
||||
.map(|sync| (sync.name.clone(), sync))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_resource_sync() -> &'static HashMap<String, ResourceSync>
|
||||
{
|
||||
static ID_TO_SYNC: OnceLock<HashMap<String, ResourceSync>> =
|
||||
OnceLock::new();
|
||||
ID_TO_SYNC.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListFullResourceSyncs::default()),
|
||||
)
|
||||
.expect("failed to get syncs from monitor")
|
||||
.into_iter()
|
||||
.map(|sync| (sync.id.clone(), sync))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
|
||||
static NAME_TO_USER_GROUP: OnceLock<HashMap<String, UserGroup>> =
|
||||
OnceLock::new();
|
||||
@@ -265,6 +285,21 @@ pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_variable() -> &'static HashMap<String, Variable> {
|
||||
static NAME_TO_VARIABLE: OnceLock<HashMap<String, Variable>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_VARIABLE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListVariables::default()),
|
||||
)
|
||||
.expect("failed to get user groups from monitor")
|
||||
.variables
|
||||
.into_iter()
|
||||
.map(|variable| (variable.name.clone(), variable))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_user() -> &'static HashMap<String, User> {
|
||||
static ID_TO_USER: OnceLock<HashMap<String, User>> =
|
||||
OnceLock::new();
|
||||
|
||||
46
bin/cli/src/state.rs
Normal file
46
bin/cli/src/state.rs
Normal file
@@ -0,0 +1,46 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use clap::Parser;
|
||||
use merge_config_files::parse_config_file;
|
||||
use monitor_client::MonitorClient;
|
||||
|
||||
pub fn cli_args() -> &'static crate::args::CliArgs {
|
||||
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn monitor_client() -> &'static MonitorClient {
|
||||
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
|
||||
MONITOR_CLIENT.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let crate::args::CredsFile { url, key, secret } =
|
||||
match (&args.url, &args.key, &args.secret) {
|
||||
(Some(url), Some(key), Some(secret)) => {
|
||||
crate::args::CredsFile {
|
||||
url: url.clone(),
|
||||
key: key.clone(),
|
||||
secret: secret.clone(),
|
||||
}
|
||||
}
|
||||
(url, key, secret) => {
|
||||
let mut creds: crate::args::CredsFile =
|
||||
parse_config_file(cli_args().creds.as_str())
|
||||
.expect("failed to parse monitor credentials");
|
||||
|
||||
if let Some(url) = url {
|
||||
creds.url.clone_from(url);
|
||||
}
|
||||
if let Some(key) = key {
|
||||
creds.key.clone_from(key);
|
||||
}
|
||||
if let Some(secret) = secret {
|
||||
creds.secret.clone_from(secret);
|
||||
}
|
||||
|
||||
creds
|
||||
}
|
||||
};
|
||||
futures::executor::block_on(MonitorClient::new(url, key, secret))
|
||||
.expect("failed to initialize monitor client")
|
||||
})
|
||||
}
|
||||
@@ -1,11 +1,19 @@
|
||||
use std::{fs, path::Path};
|
||||
use std::{
|
||||
fs,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use colored::Colorize;
|
||||
use monitor_client::entities::toml::ResourcesToml;
|
||||
use serde::de::DeserializeOwned;
|
||||
|
||||
pub fn read_resources(path: &Path) -> anyhow::Result<ResourcesToml> {
|
||||
pub fn read_resources(path: &str) -> anyhow::Result<ResourcesToml> {
|
||||
let mut res = ResourcesToml::default();
|
||||
read_resources_recursive(path, &mut res)?;
|
||||
let path =
|
||||
PathBuf::from_str(path).context("invalid resources path")?;
|
||||
read_resources_recursive(&path, &mut res)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@@ -23,23 +31,29 @@ fn read_resources_recursive(
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
let more = match crate::parse_toml_file::<ResourcesToml>(path) {
|
||||
let more = match parse_toml_file::<ResourcesToml>(path) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
warn!("failed to parse {:?}. skipping file | {e:#}", path);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
info!("adding resources from {path:?}");
|
||||
resources.server_templates.extend(more.server_templates);
|
||||
info!(
|
||||
"{} from {}",
|
||||
"adding resources".green().bold(),
|
||||
path.display().to_string().blue().bold()
|
||||
);
|
||||
resources.servers.extend(more.servers);
|
||||
resources.builds.extend(more.builds);
|
||||
resources.deployments.extend(more.deployments);
|
||||
resources.builders.extend(more.builders);
|
||||
resources.builds.extend(more.builds);
|
||||
resources.repos.extend(more.repos);
|
||||
resources.alerters.extend(more.alerters);
|
||||
resources.procedures.extend(more.procedures);
|
||||
resources.builders.extend(more.builders);
|
||||
resources.alerters.extend(more.alerters);
|
||||
resources.server_templates.extend(more.server_templates);
|
||||
resources.resource_syncs.extend(more.resource_syncs);
|
||||
resources.user_groups.extend(more.user_groups);
|
||||
resources.variables.extend(more.variables);
|
||||
Ok(())
|
||||
} else if res.is_dir() {
|
||||
let directory = fs::read_dir(path)
|
||||
@@ -56,3 +70,11 @@ fn read_resources_recursive(
|
||||
Err(anyhow!("resources path is neither file nor directory"))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_toml_file<T: DeserializeOwned>(
|
||||
path: impl AsRef<std::path::Path>,
|
||||
) -> anyhow::Result<T> {
|
||||
let contents = std::fs::read_to_string(path)
|
||||
.context("failed to read file contents")?;
|
||||
toml::from_str(&contents).context("failed to parse toml contents")
|
||||
}
|
||||
@@ -1,92 +1,174 @@
|
||||
use std::path::Path;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
self, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
};
|
||||
|
||||
use crate::{sync::resources::ResourceSync, wait_for_enter};
|
||||
use crate::{helpers::wait_for_enter, state::cli_args};
|
||||
|
||||
mod resource_file;
|
||||
mod file;
|
||||
mod resource;
|
||||
mod resources;
|
||||
mod user_group;
|
||||
mod variables;
|
||||
|
||||
pub async fn run_sync(path: &Path) -> anyhow::Result<()> {
|
||||
info!("path: {path:?}");
|
||||
use resource::ResourceSync;
|
||||
|
||||
let resources = resource_file::read_resources(path)?;
|
||||
pub async fn run(path: &str, delete: bool) -> anyhow::Result<()> {
|
||||
info!("resources path: {}", path.blue().bold());
|
||||
if delete {
|
||||
warn!("Delete mode {}", "enabled".bold());
|
||||
}
|
||||
|
||||
let resources = file::read_resources(path)?;
|
||||
|
||||
info!("computing sync actions...");
|
||||
|
||||
let (server_template_creates, server_template_updates) =
|
||||
ServerTemplate::get_updates(resources.server_templates).await?;
|
||||
let (server_creates, server_updates) =
|
||||
Server::get_updates(resources.servers).await?;
|
||||
let (deployment_creates, deployment_updates) =
|
||||
Deployment::get_updates(resources.deployments).await?;
|
||||
let (build_creates, build_updates) =
|
||||
Build::get_updates(resources.builds).await?;
|
||||
let (builder_creates, builder_updates) =
|
||||
Builder::get_updates(resources.builders).await?;
|
||||
let (alerter_creates, alerter_updates) =
|
||||
Alerter::get_updates(resources.alerters).await?;
|
||||
let (repo_creates, repo_updates) =
|
||||
Repo::get_updates(resources.repos).await?;
|
||||
let (procedure_creates, procedure_updates) =
|
||||
Procedure::get_updates(resources.procedures).await?;
|
||||
let (user_group_creates, user_group_updates) =
|
||||
user_group::get_updates(resources.user_groups).await?;
|
||||
let (server_creates, server_updates, server_deletes) =
|
||||
resource::get_updates::<Server>(resources.servers, delete)?;
|
||||
let (deployment_creates, deployment_updates, deployment_deletes) =
|
||||
resource::get_updates::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
)?;
|
||||
let (build_creates, build_updates, build_deletes) =
|
||||
resource::get_updates::<Build>(resources.builds, delete)?;
|
||||
let (repo_creates, repo_updates, repo_deletes) =
|
||||
resource::get_updates::<Repo>(resources.repos, delete)?;
|
||||
let (procedure_creates, procedure_updates, procedure_deletes) =
|
||||
resource::get_updates::<Procedure>(resources.procedures, delete)?;
|
||||
let (builder_creates, builder_updates, builder_deletes) =
|
||||
resource::get_updates::<Builder>(resources.builders, delete)?;
|
||||
let (alerter_creates, alerter_updates, alerter_deletes) =
|
||||
resource::get_updates::<Alerter>(resources.alerters, delete)?;
|
||||
let (
|
||||
server_template_creates,
|
||||
server_template_updates,
|
||||
server_template_deletes,
|
||||
) = resource::get_updates::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
)?;
|
||||
let (
|
||||
resource_sync_creates,
|
||||
resource_sync_updates,
|
||||
resource_sync_deletes,
|
||||
) = resource::get_updates::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
)?;
|
||||
|
||||
if server_template_creates.is_empty()
|
||||
let (variable_creates, variable_updates, variable_deletes) =
|
||||
variables::get_updates(resources.variables, delete)?;
|
||||
|
||||
let (user_group_creates, user_group_updates, user_group_deletes) =
|
||||
user_group::get_updates(resources.user_groups, delete).await?;
|
||||
|
||||
if resource_sync_creates.is_empty()
|
||||
&& resource_sync_updates.is_empty()
|
||||
&& resource_sync_deletes.is_empty()
|
||||
&& server_template_creates.is_empty()
|
||||
&& server_template_updates.is_empty()
|
||||
&& server_template_deletes.is_empty()
|
||||
&& server_creates.is_empty()
|
||||
&& server_updates.is_empty()
|
||||
&& server_deletes.is_empty()
|
||||
&& deployment_creates.is_empty()
|
||||
&& deployment_updates.is_empty()
|
||||
&& deployment_deletes.is_empty()
|
||||
&& build_creates.is_empty()
|
||||
&& build_updates.is_empty()
|
||||
&& build_deletes.is_empty()
|
||||
&& builder_creates.is_empty()
|
||||
&& builder_updates.is_empty()
|
||||
&& builder_deletes.is_empty()
|
||||
&& alerter_creates.is_empty()
|
||||
&& alerter_updates.is_empty()
|
||||
&& alerter_deletes.is_empty()
|
||||
&& repo_creates.is_empty()
|
||||
&& repo_updates.is_empty()
|
||||
&& repo_deletes.is_empty()
|
||||
&& procedure_creates.is_empty()
|
||||
&& procedure_updates.is_empty()
|
||||
&& procedure_deletes.is_empty()
|
||||
&& user_group_creates.is_empty()
|
||||
&& user_group_updates.is_empty()
|
||||
&& user_group_deletes.is_empty()
|
||||
&& variable_creates.is_empty()
|
||||
&& variable_updates.is_empty()
|
||||
&& variable_deletes.is_empty()
|
||||
{
|
||||
info!("nothing to do. exiting.");
|
||||
info!("{}. exiting.", "nothing to do".green().bold());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
wait_for_enter("run sync")?;
|
||||
if !cli_args().yes {
|
||||
wait_for_enter("run sync")?;
|
||||
}
|
||||
|
||||
// No deps
|
||||
entities::sync::ResourceSync::run_updates(
|
||||
resource_sync_creates,
|
||||
resource_sync_updates,
|
||||
resource_sync_deletes,
|
||||
)
|
||||
.await;
|
||||
ServerTemplate::run_updates(
|
||||
server_template_creates,
|
||||
server_template_updates,
|
||||
server_template_deletes,
|
||||
)
|
||||
.await;
|
||||
Server::run_updates(server_creates, server_updates, server_deletes)
|
||||
.await;
|
||||
Alerter::run_updates(
|
||||
alerter_creates,
|
||||
alerter_updates,
|
||||
alerter_deletes,
|
||||
)
|
||||
.await;
|
||||
Server::run_updates(server_creates, server_updates).await;
|
||||
Alerter::run_updates(alerter_creates, alerter_updates).await;
|
||||
|
||||
// Dependant on server
|
||||
Builder::run_updates(builder_creates, builder_updates).await;
|
||||
Repo::run_updates(repo_creates, repo_updates).await;
|
||||
Builder::run_updates(
|
||||
builder_creates,
|
||||
builder_updates,
|
||||
builder_deletes,
|
||||
)
|
||||
.await;
|
||||
Repo::run_updates(repo_creates, repo_updates, repo_deletes).await;
|
||||
|
||||
// Dependant on builder
|
||||
Build::run_updates(build_creates, build_updates).await;
|
||||
|
||||
// Dependant on server / builder
|
||||
Deployment::run_updates(deployment_creates, deployment_updates)
|
||||
Build::run_updates(build_creates, build_updates, build_deletes)
|
||||
.await;
|
||||
|
||||
// Dependant on server / build
|
||||
Deployment::run_updates(
|
||||
deployment_creates,
|
||||
deployment_updates,
|
||||
deployment_deletes,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Dependant on everything
|
||||
Procedure::run_updates(procedure_creates, procedure_updates).await;
|
||||
user_group::run_updates(user_group_creates, user_group_updates)
|
||||
.await;
|
||||
Procedure::run_updates(
|
||||
procedure_creates,
|
||||
procedure_updates,
|
||||
procedure_deletes,
|
||||
)
|
||||
.await;
|
||||
variables::run_updates(
|
||||
variable_creates,
|
||||
variable_updates,
|
||||
variable_deletes,
|
||||
)
|
||||
.await;
|
||||
user_group::run_updates(
|
||||
user_group_creates,
|
||||
user_group_updates,
|
||||
user_group_deletes,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
357
bin/cli/src/sync/resource.rs
Normal file
357
bin/cli/src/sync/resource.rs
Normal file
@@ -0,0 +1,357 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::write::{UpdateDescription, UpdateTagsOnResource},
|
||||
entities::{
|
||||
resource::Resource, toml::ResourceToml, update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::maps::id_to_tag;
|
||||
|
||||
pub type ToUpdate<T> = Vec<ToUpdateItem<T>>;
|
||||
pub type ToCreate<T> = Vec<ResourceToml<T>>;
|
||||
/// Vec of resource names
|
||||
pub type ToDelete = Vec<String>;
|
||||
|
||||
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>, ToDelete);
|
||||
|
||||
pub struct ToUpdateItem<T> {
|
||||
pub id: String,
|
||||
pub resource: ResourceToml<T>,
|
||||
pub update_description: bool,
|
||||
pub update_tags: bool,
|
||||
}
|
||||
|
||||
pub trait ResourceSync: Sized {
|
||||
type Config: Clone
|
||||
+ Default
|
||||
+ Send
|
||||
+ From<Self::PartialConfig>
|
||||
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
|
||||
+ 'static;
|
||||
type Info: Default + 'static;
|
||||
type PartialConfig: std::fmt::Debug
|
||||
+ Clone
|
||||
+ Send
|
||||
+ From<Self::Config>
|
||||
+ From<Self::ConfigDiff>
|
||||
+ Serialize
|
||||
+ MaybeNone
|
||||
+ 'static;
|
||||
type ConfigDiff: Diff + MaybeNone;
|
||||
|
||||
fn display() -> &'static str;
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget;
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>;
|
||||
|
||||
/// Creates the resource and returns created id.
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String>;
|
||||
|
||||
/// Updates the resource at id with the partial config.
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
/// Apply any changes to incoming toml partial config
|
||||
/// before it is diffed against existing config
|
||||
fn validate_partial_config(_config: &mut Self::PartialConfig) {}
|
||||
|
||||
/// Diffs the declared toml (partial) against the full existing config.
|
||||
/// Removes all fields from toml (partial) that haven't changed.
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff>;
|
||||
|
||||
/// Apply any changes to computed config diff
|
||||
/// before logging
|
||||
fn validate_diff(_diff: &mut Self::ConfigDiff) {}
|
||||
|
||||
/// Deletes the target resource
|
||||
async fn delete(id_or_name: String) -> anyhow::Result<()>;
|
||||
|
||||
async fn run_updates(
|
||||
to_create: ToCreate<Self::PartialConfig>,
|
||||
to_update: ToUpdate<Self::PartialConfig>,
|
||||
to_delete: ToDelete,
|
||||
) {
|
||||
for resource in to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match Self::create(resource).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to create {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
run_update_tags::<Self>(id.clone(), &name, tags).await;
|
||||
run_update_description::<Self>(id, &name, description).await;
|
||||
info!(
|
||||
"{} {} '{}'",
|
||||
"created".green().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
for ToUpdateItem {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_tags,
|
||||
} in to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
|
||||
if update_description {
|
||||
run_update_description::<Self>(
|
||||
id.clone(),
|
||||
&name,
|
||||
description,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if update_tags {
|
||||
run_update_tags::<Self>(id.clone(), &name, tags).await;
|
||||
}
|
||||
|
||||
if !resource.config.is_none() {
|
||||
if let Err(e) = Self::update(id, resource).await {
|
||||
warn!(
|
||||
"failed to update config on {} {name} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' configuration",
|
||||
"updated".blue().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for resource in to_delete {
|
||||
if let Err(e) = Self::delete(resource.clone()).await {
|
||||
warn!(
|
||||
"failed to delete {} {resource} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}'",
|
||||
"deleted".red().bold(),
|
||||
Self::display(),
|
||||
resource.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets all the resources to update, logging along the way.
|
||||
pub fn get_updates<Resource: ResourceSync>(
|
||||
resources: Vec<ResourceToml<Resource::PartialConfig>>,
|
||||
delete: bool,
|
||||
) -> anyhow::Result<UpdatesResult<Resource::PartialConfig>> {
|
||||
let map = Resource::name_to_resource();
|
||||
|
||||
let mut to_create = ToCreate::<Resource::PartialConfig>::new();
|
||||
let mut to_update = ToUpdate::<Resource::PartialConfig>::new();
|
||||
let mut to_delete = ToDelete::new();
|
||||
|
||||
if delete {
|
||||
for resource in map.values() {
|
||||
if !resources.iter().any(|r| r.name == resource.name) {
|
||||
to_delete.push(resource.name.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for mut resource in resources {
|
||||
match map.get(&resource.name) {
|
||||
Some(original) => {
|
||||
// First merge toml resource config (partial) onto default resource config.
|
||||
// Makes sure things that aren't defined in toml (come through as None) actually get removed.
|
||||
let config: Resource::Config = resource.config.into();
|
||||
resource.config = config.into();
|
||||
|
||||
Resource::validate_partial_config(&mut resource.config);
|
||||
|
||||
let mut diff = Resource::get_diff(
|
||||
original.config.clone(),
|
||||
resource.config,
|
||||
)?;
|
||||
|
||||
Resource::validate_diff(&mut diff);
|
||||
|
||||
let original_tags = original
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|id| {
|
||||
id_to_tag().get(id).map(|t| t.name.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Only proceed if there are any fields to update,
|
||||
// or a change to tags / description
|
||||
if diff.is_none()
|
||||
&& resource.description == original.description
|
||||
&& resource.tags == original_tags
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
println!(
|
||||
"\n{}: {}: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
Resource::display(),
|
||||
resource.name.bold(),
|
||||
);
|
||||
let mut lines = Vec::<String>::new();
|
||||
if resource.description != original.description {
|
||||
lines.push(format!(
|
||||
"{}: 'description'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.description.red(),
|
||||
"to".dimmed(),
|
||||
resource.description.green()
|
||||
))
|
||||
}
|
||||
if resource.tags != original_tags {
|
||||
let from = format!("{:?}", original_tags).red();
|
||||
let to = format!("{:?}", resource.tags).green();
|
||||
lines.push(format!(
|
||||
"{}: 'tags'\n{}: {from}\n{}: {to}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
"to".dimmed(),
|
||||
));
|
||||
}
|
||||
lines.extend(diff.iter_field_diffs().map(
|
||||
|FieldDiff { field, from, to }| {
|
||||
format!(
|
||||
"{}: '{field}'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
from.red(),
|
||||
"to".dimmed(),
|
||||
to.green()
|
||||
)
|
||||
},
|
||||
));
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
|
||||
// Minimizes updates through diffing.
|
||||
resource.config = diff.into();
|
||||
|
||||
let update = ToUpdateItem {
|
||||
id: original.id.clone(),
|
||||
update_description: resource.description
|
||||
!= original.description,
|
||||
update_tags: resource.tags != original_tags,
|
||||
resource,
|
||||
};
|
||||
|
||||
to_update.push(update);
|
||||
}
|
||||
None => {
|
||||
println!(
|
||||
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
Resource::display(),
|
||||
resource.name.bold().green(),
|
||||
"description".dimmed(),
|
||||
resource.description,
|
||||
"tags".dimmed(),
|
||||
resource.tags,
|
||||
"config".dimmed(),
|
||||
serde_json::to_string_pretty(&resource.config)?
|
||||
);
|
||||
to_create.push(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name in &to_delete {
|
||||
println!(
|
||||
"\n{}: {}: '{}'\n-------------------",
|
||||
"DELETE".red(),
|
||||
Resource::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update, to_delete))
|
||||
}
|
||||
|
||||
pub async fn run_update_tags<Resource: ResourceSync>(
|
||||
id: String,
|
||||
name: &str,
|
||||
tags: Vec<String>,
|
||||
) {
|
||||
// Update tags
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(UpdateTagsOnResource {
|
||||
target: Resource::resource_target(id),
|
||||
tags,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update tags on {} {name} | {e:#}",
|
||||
Resource::display(),
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' tags",
|
||||
"updated".blue().bold(),
|
||||
Resource::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_update_description<Resource: ResourceSync>(
|
||||
id: String,
|
||||
name: &str,
|
||||
description: String,
|
||||
) {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(UpdateDescription {
|
||||
target: Resource::resource_target(id.clone()),
|
||||
description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to update resource {id} description | {e:#}");
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' description",
|
||||
"updated".blue().bold(),
|
||||
Resource::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,31 +1,28 @@
|
||||
use partial_derive2::PartialDiff;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetAlerter,
|
||||
write::{CreateAlerter, UpdateAlerter},
|
||||
},
|
||||
api::write::{CreateAlerter, DeleteAlerter, UpdateAlerter},
|
||||
entities::{
|
||||
alerter::{
|
||||
Alerter, AlerterConfig, AlerterConfigDiff, AlerterInfo, AlerterListItemInfo, PartialAlerterConfig
|
||||
Alerter, AlerterConfig, AlerterConfigDiff, PartialAlerterConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{maps::name_to_alerter, monitor_client};
|
||||
|
||||
use super::ResourceSync;
|
||||
use crate::{
|
||||
maps::name_to_alerter, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Alerter {
|
||||
type Config = AlerterConfig;
|
||||
type Info = AlerterInfo;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialAlerterConfig;
|
||||
type ConfigDiff = AlerterConfigDiff;
|
||||
type ListItemInfo = AlerterListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"alerter"
|
||||
@@ -36,7 +33,7 @@ impl ResourceSync for Alerter {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_alerter()
|
||||
}
|
||||
@@ -66,16 +63,15 @@ impl ResourceSync for Alerter {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetAlerter { alerter: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteAlerter { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetBuild,
|
||||
write::{CreateBuild, UpdateBuild},
|
||||
},
|
||||
api::write::{CreateBuild, DeleteBuild, UpdateBuild},
|
||||
entities::{
|
||||
build::{
|
||||
Build, BuildConfig, BuildConfigDiff, BuildInfo,
|
||||
BuildListItemInfo, PartialBuildConfig,
|
||||
PartialBuildConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
@@ -19,17 +16,15 @@ use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_builder, name_to_build},
|
||||
monitor_client,
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Build {
|
||||
type Config = BuildConfig;
|
||||
type Info = BuildInfo;
|
||||
type PartialConfig = PartialBuildConfig;
|
||||
type ConfigDiff = BuildConfigDiff;
|
||||
type ListItemInfo = BuildListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"build"
|
||||
@@ -40,7 +35,7 @@ impl ResourceSync for Build {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_build()
|
||||
}
|
||||
@@ -70,13 +65,7 @@ impl ResourceSync for Build {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetBuild { build: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
@@ -88,4 +77,17 @@ impl ResourceSync for Build {
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
fn validate_diff(diff: &mut Self::ConfigDiff) {
|
||||
if let Some((_, to)) = &diff.version {
|
||||
if to.is_none() {
|
||||
diff.version = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteBuild { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetBuilder,
|
||||
write::{CreateBuilder, UpdateBuilder},
|
||||
},
|
||||
api::write::{CreateBuilder, DeleteBuilder, UpdateBuilder},
|
||||
entities::{
|
||||
builder::{
|
||||
Builder, BuilderConfig, BuilderConfigDiff, BuilderListItemInfo,
|
||||
PartialBuilderConfig,
|
||||
Builder, BuilderConfig, BuilderConfigDiff, PartialBuilderConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
@@ -19,17 +15,15 @@ use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_server, name_to_builder},
|
||||
monitor_client,
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Builder {
|
||||
type Config = BuilderConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialBuilderConfig;
|
||||
type ConfigDiff = BuilderConfigDiff;
|
||||
type ListItemInfo = BuilderListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"builder"
|
||||
@@ -40,7 +34,7 @@ impl ResourceSync for Builder {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_builder()
|
||||
}
|
||||
@@ -70,13 +64,7 @@ impl ResourceSync for Builder {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetBuilder { builder: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
@@ -90,4 +78,9 @@ impl ResourceSync for Builder {
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteBuilder { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{read::GetDeployment, write},
|
||||
api::write::{self, DeleteDeployment},
|
||||
entities::{
|
||||
deployment::{
|
||||
Deployment, DeploymentConfig, DeploymentConfigDiff,
|
||||
DeploymentImage, DeploymentListItemInfo,
|
||||
PartialDeploymentConfig,
|
||||
DeploymentImage, PartialDeploymentConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
@@ -17,17 +16,15 @@ use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_build, id_to_server, name_to_deployment},
|
||||
monitor_client,
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Deployment {
|
||||
type Config = DeploymentConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialDeploymentConfig;
|
||||
type ConfigDiff = DeploymentConfigDiff;
|
||||
type ListItemInfo = DeploymentListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"deployment"
|
||||
@@ -38,7 +35,7 @@ impl ResourceSync for Deployment {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_deployment()
|
||||
}
|
||||
@@ -68,15 +65,7 @@ impl ResourceSync for Deployment {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client()
|
||||
.read(GetDeployment { deployment: id })
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
@@ -101,4 +90,9 @@ impl ResourceSync for Deployment {
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteDeployment { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,318 +1,9 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::write::{UpdateDescription, UpdateTagsOnResource},
|
||||
entities::{
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{cli_args, maps::id_to_tag, monitor_client};
|
||||
|
||||
pub mod alerter;
|
||||
pub mod build;
|
||||
pub mod builder;
|
||||
pub mod deployment;
|
||||
pub mod procedure;
|
||||
pub mod repo;
|
||||
pub mod server;
|
||||
pub mod server_template;
|
||||
|
||||
type ToUpdate<T> = Vec<ToUpdateItem<T>>;
|
||||
type ToCreate<T> = Vec<ResourceToml<T>>;
|
||||
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>);
|
||||
|
||||
pub struct ToUpdateItem<T> {
|
||||
pub id: String,
|
||||
pub resource: ResourceToml<T>,
|
||||
pub update_description: bool,
|
||||
pub update_tags: bool,
|
||||
}
|
||||
|
||||
pub trait ResourceSync {
|
||||
type Config: Clone
|
||||
+ Send
|
||||
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
|
||||
+ 'static;
|
||||
type Info: Default;
|
||||
type PartialConfig: std::fmt::Debug
|
||||
+ Clone
|
||||
+ Send
|
||||
+ From<Self::ConfigDiff>
|
||||
+ Serialize
|
||||
+ 'static;
|
||||
type ConfigDiff: Diff + MaybeNone;
|
||||
type ListItemInfo: 'static;
|
||||
|
||||
fn display() -> &'static str;
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget;
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>;
|
||||
|
||||
/// Creates the resource and returns created id.
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String>;
|
||||
|
||||
/// Updates the resource at id with the partial config.
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>>;
|
||||
|
||||
/// Diffs the declared toml (partial) against the full existing config.
|
||||
/// Removes all fields from toml (partial) that haven't changed.
|
||||
async fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff>;
|
||||
|
||||
async fn get_updates(
|
||||
resources: Vec<ResourceToml<Self::PartialConfig>>,
|
||||
) -> anyhow::Result<UpdatesResult<Self::PartialConfig>> {
|
||||
let map = Self::name_to_resource();
|
||||
|
||||
let mut to_create = ToCreate::<Self::PartialConfig>::new();
|
||||
let mut to_update = ToUpdate::<Self::PartialConfig>::new();
|
||||
|
||||
let quiet = cli_args().quiet;
|
||||
|
||||
for mut resource in resources {
|
||||
match map.get(&resource.name).map(|s| s.id.clone()) {
|
||||
Some(id) => {
|
||||
// Get the full original config for the resource.
|
||||
let original = Self::get(id.clone()).await?;
|
||||
|
||||
let diff =
|
||||
Self::get_diff(original.config, resource.config).await?;
|
||||
|
||||
let original_tags = original
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|id| {
|
||||
id_to_tag().get(id).map(|t| t.name.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Only proceed if there are any fields to update,
|
||||
// or a change to tags / description
|
||||
if diff.is_none()
|
||||
&& resource.description == original.description
|
||||
&& resource.tags == original_tags
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if !quiet {
|
||||
println!(
|
||||
"\n{}: {}: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
Self::display(),
|
||||
resource.name.bold(),
|
||||
);
|
||||
let mut lines = Vec::<String>::new();
|
||||
if resource.description != original.description {
|
||||
lines.push(format!(
|
||||
"{}: 'description'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.description.red(),
|
||||
"to".dimmed(),
|
||||
resource.description.green()
|
||||
))
|
||||
}
|
||||
if resource.tags != original_tags {
|
||||
let from = format!("{:?}", original_tags).red();
|
||||
let to = format!("{:?}", resource.tags).green();
|
||||
lines.push(format!(
|
||||
"{}: 'tags'\n{}: {from}\n{}: {to}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
"to".dimmed(),
|
||||
));
|
||||
}
|
||||
lines.extend(diff.iter_field_diffs().map(
|
||||
|FieldDiff { field, from, to }| {
|
||||
format!(
|
||||
"{}: '{field}'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
from.red(),
|
||||
"to".dimmed(),
|
||||
to.green()
|
||||
)
|
||||
},
|
||||
));
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
}
|
||||
|
||||
// Minimizes updates through diffing.
|
||||
resource.config = diff.into();
|
||||
|
||||
let update = ToUpdateItem {
|
||||
id,
|
||||
update_description: resource.description
|
||||
!= original.description,
|
||||
update_tags: resource.tags != original_tags,
|
||||
resource,
|
||||
};
|
||||
|
||||
to_update.push(update);
|
||||
}
|
||||
None => {
|
||||
if !quiet {
|
||||
println!(
|
||||
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
Self::display(),
|
||||
resource.name.bold().green(),
|
||||
"description".dimmed(),
|
||||
resource.description,
|
||||
"tags".dimmed(),
|
||||
resource.tags,
|
||||
"config".dimmed(),
|
||||
serde_json::to_string_pretty(&resource.config)?
|
||||
)
|
||||
}
|
||||
to_create.push(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if quiet && !to_create.is_empty() {
|
||||
println!(
|
||||
"\n{}s {}: {:#?}",
|
||||
Self::display(),
|
||||
"TO CREATE".green(),
|
||||
to_create.iter().map(|item| item.name.as_str())
|
||||
);
|
||||
}
|
||||
|
||||
if quiet && !to_update.is_empty() {
|
||||
println!(
|
||||
"\n{}s {}: {:#?}",
|
||||
Self::display(),
|
||||
"TO UPDATE".blue(),
|
||||
to_update
|
||||
.iter()
|
||||
.map(|update| update.resource.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update))
|
||||
}
|
||||
|
||||
async fn run_updates(
|
||||
to_create: ToCreate<Self::PartialConfig>,
|
||||
to_update: ToUpdate<Self::PartialConfig>,
|
||||
) {
|
||||
let log_after = !to_update.is_empty() || !to_create.is_empty();
|
||||
|
||||
for resource in to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match Self::create(resource).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to create {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
Self::update_description(id, &name, description).await;
|
||||
info!("{} {name} created", Self::display());
|
||||
}
|
||||
|
||||
for ToUpdateItem {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_tags,
|
||||
} in to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
|
||||
if update_description {
|
||||
Self::update_description(id.clone(), &name, description)
|
||||
.await;
|
||||
}
|
||||
|
||||
if update_tags {
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
}
|
||||
|
||||
if let Err(e) = Self::update(id, resource).await {
|
||||
warn!(
|
||||
"failed to update config on {} {name} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
} else {
|
||||
info!("updated {} {name} config", Self::display());
|
||||
}
|
||||
|
||||
info!("{} {name} updated", Self::display());
|
||||
}
|
||||
|
||||
if log_after {
|
||||
info!(
|
||||
"============ {}s synced ✅ ============",
|
||||
Self::display()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_tags(id: String, name: &str, tags: Vec<String>) {
|
||||
// Update tags
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateTagsOnResource {
|
||||
target: Self::resource_target(id),
|
||||
tags,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update tags on {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
} else {
|
||||
info!("updated {} {name} tags", Self::display());
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_description(
|
||||
id: String,
|
||||
name: &str,
|
||||
description: String,
|
||||
) {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateDescription {
|
||||
target: Self::resource_target(id.clone()),
|
||||
description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to update resource {id} description | {e:#}");
|
||||
} else {
|
||||
info!("updated {} {name} description", Self::display());
|
||||
}
|
||||
}
|
||||
}
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod sync;
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
execute::Execution,
|
||||
read::GetProcedure,
|
||||
write::{CreateProcedure, UpdateProcedure},
|
||||
write::{CreateProcedure, DeleteProcedure, UpdateProcedure},
|
||||
},
|
||||
entities::{
|
||||
procedure::{
|
||||
PartialProcedureConfig, Procedure, ProcedureConfig, ProcedureConfigDiff, ProcedureListItemInfo
|
||||
PartialProcedureConfig, Procedure, ProcedureConfig,
|
||||
ProcedureConfigDiff,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
@@ -20,20 +21,20 @@ use partial_derive2::{MaybeNone, PartialDiff};
|
||||
use crate::{
|
||||
maps::{
|
||||
id_to_build, id_to_deployment, id_to_procedure, id_to_repo,
|
||||
id_to_server, name_to_procedure,
|
||||
id_to_resource_sync, id_to_server, name_to_procedure,
|
||||
},
|
||||
state::monitor_client,
|
||||
sync::resource::{
|
||||
run_update_description, run_update_tags, ResourceSync, ToCreate,
|
||||
ToDelete, ToUpdate, ToUpdateItem,
|
||||
},
|
||||
monitor_client,
|
||||
sync::resources::ToUpdateItem,
|
||||
};
|
||||
|
||||
use super::{ResourceSync, ToCreate, ToUpdate};
|
||||
|
||||
impl ResourceSync for Procedure {
|
||||
type Config = ProcedureConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialProcedureConfig;
|
||||
type ConfigDiff = ProcedureConfigDiff;
|
||||
type ListItemInfo = ProcedureListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"procedure"
|
||||
@@ -44,7 +45,7 @@ impl ResourceSync for Procedure {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_procedure()
|
||||
}
|
||||
@@ -77,7 +78,23 @@ impl ResourceSync for Procedure {
|
||||
async fn run_updates(
|
||||
mut to_create: ToCreate<Self::PartialConfig>,
|
||||
mut to_update: ToUpdate<Self::PartialConfig>,
|
||||
to_delete: ToDelete,
|
||||
) {
|
||||
for name in to_delete {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(DeleteProcedure { id: name.clone() })
|
||||
.await
|
||||
{
|
||||
warn!("failed to delete procedure {name} | {e:#}",);
|
||||
} else {
|
||||
info!(
|
||||
"{} procedure '{}'",
|
||||
"deleted".red().bold(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if to_update.is_empty() && to_create.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -96,11 +113,15 @@ impl ResourceSync for Procedure {
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
if *update_description {
|
||||
Self::update_description(id.clone(), &name, description)
|
||||
.await;
|
||||
run_update_description::<Procedure>(
|
||||
id.clone(),
|
||||
&name,
|
||||
description,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
if *update_tags {
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
|
||||
}
|
||||
if !resource.config.is_none() {
|
||||
if let Err(e) =
|
||||
@@ -112,6 +133,7 @@ impl ResourceSync for Procedure {
|
||||
Self::display()
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,112 +161,115 @@ impl ResourceSync for Procedure {
|
||||
continue;
|
||||
}
|
||||
};
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
Self::update_description(id, &name, description).await;
|
||||
run_update_tags::<Procedure>(id.clone(), &name, tags).await;
|
||||
run_update_description::<Procedure>(id, &name, description)
|
||||
.await;
|
||||
info!("{} {name} created", Self::display());
|
||||
to_pull.push(name);
|
||||
}
|
||||
to_create.retain(|resource| !to_pull.contains(&resource.name));
|
||||
|
||||
if to_update.is_empty() && to_create.is_empty() {
|
||||
info!(
|
||||
"============ {}s synced ✅ ============",
|
||||
Self::display()
|
||||
);
|
||||
// info!("all procedures synced");
|
||||
return;
|
||||
}
|
||||
}
|
||||
warn!("procedure sync loop exited after max iterations");
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetProcedure { procedure: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
for execution in &mut original.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::None(_) => {}
|
||||
Execution::RunProcedure(config) => {
|
||||
config.procedure = id_to_procedure()
|
||||
.get(&config.procedure)
|
||||
.map(|p| p.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunBuild(config) => {
|
||||
config.build = id_to_build()
|
||||
.get(&config.build)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::Deploy(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StartContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RemoveContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::CloneRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PullRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopAllContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneDockerNetworks(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneDockerImages(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneDockerContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
for stage in &mut original.stages {
|
||||
for execution in &mut stage.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::None(_) | Execution::Sleep(_) => {}
|
||||
Execution::RunProcedure(config) => {
|
||||
config.procedure = id_to_procedure()
|
||||
.get(&config.procedure)
|
||||
.map(|p| p.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunBuild(config) => {
|
||||
config.build = id_to_build()
|
||||
.get(&config.build)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::Deploy(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StartContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RemoveContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::CloneRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PullRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopAllContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneNetworks(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneImages(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunSync(config) => {
|
||||
config.sync = id_to_resource_sync()
|
||||
.get(&config.sync)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(_: String) -> anyhow::Result<()> {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetRepo,
|
||||
write::{CreateRepo, UpdateRepo},
|
||||
},
|
||||
api::write::{CreateRepo, DeleteRepo, UpdateRepo},
|
||||
entities::{
|
||||
repo::{
|
||||
PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo,
|
||||
RepoListItemInfo,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
@@ -19,17 +15,15 @@ use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_server, name_to_repo},
|
||||
monitor_client,
|
||||
state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Repo {
|
||||
type Config = RepoConfig;
|
||||
type Info = RepoInfo;
|
||||
type PartialConfig = PartialRepoConfig;
|
||||
type ConfigDiff = RepoConfigDiff;
|
||||
type ListItemInfo = RepoListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"repo"
|
||||
@@ -40,7 +34,7 @@ impl ResourceSync for Repo {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_repo()
|
||||
}
|
||||
@@ -70,13 +64,7 @@ impl ResourceSync for Repo {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetRepo { repo: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
@@ -88,4 +76,9 @@ impl ResourceSync for Repo {
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteRepo { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetServer,
|
||||
write::{CreateServer, UpdateServer},
|
||||
},
|
||||
api::write::{CreateServer, DeleteServer, UpdateServer},
|
||||
entities::{
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
server::{
|
||||
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
|
||||
ServerListItemInfo,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
@@ -17,16 +13,16 @@ use monitor_client::{
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{maps::name_to_server, monitor_client};
|
||||
|
||||
use super::ResourceSync;
|
||||
use crate::{
|
||||
maps::name_to_server, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for Server {
|
||||
type Config = ServerConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialServerConfig;
|
||||
type ConfigDiff = ServerConfigDiff;
|
||||
type ListItemInfo = ServerListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"server"
|
||||
@@ -37,7 +33,7 @@ impl ResourceSync for Server {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_server()
|
||||
}
|
||||
@@ -67,16 +63,15 @@ impl ResourceSync for Server {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetServer { server: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteServer { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetServerTemplate,
|
||||
write::{CreateServerTemplate, UpdateServerTemplate},
|
||||
api::write::{
|
||||
CreateServerTemplate, DeleteServerTemplate, UpdateServerTemplate,
|
||||
},
|
||||
entities::{
|
||||
resource::{Resource, ResourceListItem},
|
||||
resource::Resource,
|
||||
server_template::{
|
||||
PartialServerTemplateConfig, ServerTemplate, ServerTemplateConfig, ServerTemplateConfigDiff, ServerTemplateListItemInfo
|
||||
PartialServerTemplateConfig, ServerTemplate,
|
||||
ServerTemplateConfig, ServerTemplateConfigDiff,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
@@ -16,16 +16,16 @@ use monitor_client::{
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{maps::name_to_server_template, monitor_client};
|
||||
|
||||
use super::ResourceSync;
|
||||
use crate::{
|
||||
maps::name_to_server_template, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for ServerTemplate {
|
||||
type Config = ServerTemplateConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialServerTemplateConfig;
|
||||
type ConfigDiff = ServerTemplateConfigDiff;
|
||||
type ListItemInfo = ServerTemplateListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"server template"
|
||||
@@ -36,7 +36,7 @@ impl ResourceSync for ServerTemplate {
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_server_template()
|
||||
}
|
||||
@@ -66,20 +66,15 @@ impl ResourceSync for ServerTemplate {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client()
|
||||
.read(GetServerTemplate {
|
||||
server_template: id,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteServerTemplate { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
81
bin/cli/src/sync/resources/sync.rs
Normal file
81
bin/cli/src/sync/resources/sync.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateResourceSync, DeleteResourceSync, UpdateResourceSync,
|
||||
},
|
||||
entities::{
|
||||
self,
|
||||
resource::Resource,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, ResourceSyncConfig,
|
||||
ResourceSyncConfigDiff, ResourceSyncInfo,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::name_to_resource_sync, state::monitor_client,
|
||||
sync::resource::ResourceSync,
|
||||
};
|
||||
|
||||
impl ResourceSync for entities::sync::ResourceSync {
|
||||
type Config = ResourceSyncConfig;
|
||||
type Info = ResourceSyncInfo;
|
||||
type PartialConfig = PartialResourceSyncConfig;
|
||||
type ConfigDiff = ResourceSyncConfigDiff;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"resource sync"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::ResourceSync(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
name_to_resource_sync()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateResourceSync {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateResourceSync {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
|
||||
async fn delete(id: String) -> anyhow::Result<()> {
|
||||
monitor_client().write(DeleteResourceSync { id }).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,13 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::ListUserTargetPermissions,
|
||||
write::{
|
||||
CreateUserGroup, SetUsersInUserGroup, UpdatePermissionOnTarget,
|
||||
CreateUserGroup, DeleteUserGroup, SetUsersInUserGroup,
|
||||
UpdatePermissionOnTarget,
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
@@ -15,143 +17,247 @@ use monitor_client::{
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
maps::{
|
||||
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
|
||||
id_to_procedure, id_to_repo, id_to_server, id_to_server_template,
|
||||
id_to_user, name_to_user_group,
|
||||
},
|
||||
monitor_client,
|
||||
use crate::maps::{
|
||||
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
|
||||
id_to_procedure, id_to_repo, id_to_resource_sync, id_to_server,
|
||||
id_to_server_template, id_to_user, name_to_user_group,
|
||||
};
|
||||
|
||||
pub struct UpdateItem {
|
||||
user_group: UserGroupToml,
|
||||
update_users: bool,
|
||||
update_permissions: bool,
|
||||
}
|
||||
|
||||
pub struct DeleteItem {
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
pub async fn get_updates(
|
||||
user_groups: Vec<UserGroupToml>,
|
||||
) -> anyhow::Result<(Vec<UserGroupToml>, Vec<UserGroupToml>)> {
|
||||
delete: bool,
|
||||
) -> anyhow::Result<(
|
||||
Vec<UserGroupToml>,
|
||||
Vec<UpdateItem>,
|
||||
Vec<DeleteItem>,
|
||||
)> {
|
||||
let map = name_to_user_group();
|
||||
|
||||
let mut to_create = Vec::<UserGroupToml>::new();
|
||||
let mut to_update = Vec::<UserGroupToml>::new();
|
||||
let mut to_update = Vec::<UpdateItem>::new();
|
||||
let mut to_delete = Vec::<DeleteItem>::new();
|
||||
|
||||
for mut user_group in user_groups {
|
||||
match map.get(&user_group.name).cloned() {
|
||||
Some(original) => {
|
||||
// replace the user ids with usernames
|
||||
let mut users = original
|
||||
.users
|
||||
.into_iter()
|
||||
.filter_map(|user_id| {
|
||||
id_to_user().get(&user_id).map(|u| u.username.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut permissions = monitor_client()
|
||||
.read(ListUserTargetPermissions {
|
||||
user_target: UserTarget::UserGroup(original.id),
|
||||
})
|
||||
.await
|
||||
.context("failed to query for UserGroup permissions")?
|
||||
.into_iter()
|
||||
.map(|mut p| {
|
||||
// replace the ids with names
|
||||
match &mut p.resource_target {
|
||||
ResourceTarget::System(_) => {}
|
||||
ResourceTarget::Build(id) => {
|
||||
*id = id_to_build()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
*id = id_to_builder()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
*id = id_to_deployment()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
*id = id_to_server()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
*id = id_to_repo()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
*id = id_to_alerter()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
*id = id_to_procedure()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = id_to_server_template()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
PermissionToml {
|
||||
target: p.resource_target,
|
||||
level: p.level,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
users.sort();
|
||||
user_group.users.sort();
|
||||
|
||||
user_group.permissions.sort_by(sort_permissions);
|
||||
permissions.sort_by(sort_permissions);
|
||||
|
||||
// only push update after failed diff
|
||||
if user_group.users != users
|
||||
|| user_group.permissions != permissions
|
||||
{
|
||||
// no update from users
|
||||
to_update.push(user_group);
|
||||
}
|
||||
if delete {
|
||||
for user_group in map.values() {
|
||||
if !user_groups.iter().any(|ug| ug.name == user_group.name) {
|
||||
to_delete.push(DeleteItem {
|
||||
id: user_group.id.clone(),
|
||||
name: user_group.name.clone(),
|
||||
});
|
||||
}
|
||||
None => to_create.push(user_group),
|
||||
}
|
||||
}
|
||||
|
||||
if !to_create.is_empty() {
|
||||
let id_to_user = id_to_user();
|
||||
|
||||
for mut user_group in user_groups {
|
||||
let original = match map.get(&user_group.name).cloned() {
|
||||
Some(original) => original,
|
||||
None => {
|
||||
println!(
|
||||
"\n{}: user group: {}\n{}: {:?}\n{}: {:?}",
|
||||
"CREATE".green(),
|
||||
user_group.name.bold().green(),
|
||||
"users".dimmed(),
|
||||
user_group.users,
|
||||
"permissions".dimmed(),
|
||||
user_group.permissions,
|
||||
);
|
||||
to_create.push(user_group);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let mut original_users = original
|
||||
.users
|
||||
.into_iter()
|
||||
.filter_map(|user_id| {
|
||||
id_to_user.get(&user_id).map(|u| u.username.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut original_permissions = crate::state::monitor_client()
|
||||
.read(ListUserTargetPermissions {
|
||||
user_target: UserTarget::UserGroup(original.id),
|
||||
})
|
||||
.await
|
||||
.context("failed to query for existing UserGroup permissions")?
|
||||
.into_iter()
|
||||
.map(|mut p| {
|
||||
// replace the ids with names
|
||||
match &mut p.resource_target {
|
||||
ResourceTarget::System(_) => {}
|
||||
ResourceTarget::Build(id) => {
|
||||
*id = id_to_build()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
*id = id_to_builder()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
*id = id_to_deployment()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
*id = id_to_server()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
*id = id_to_repo()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
*id = id_to_alerter()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
*id = id_to_procedure()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = id_to_server_template()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
*id = id_to_resource_sync()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
PermissionToml {
|
||||
target: p.resource_target,
|
||||
level: p.level,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
original_users.sort();
|
||||
user_group.users.sort();
|
||||
|
||||
user_group.permissions.sort_by(sort_permissions);
|
||||
original_permissions.sort_by(sort_permissions);
|
||||
|
||||
let update_users = user_group.users != original_users;
|
||||
let update_permissions =
|
||||
user_group.permissions != original_permissions;
|
||||
|
||||
// only push update after failed diff
|
||||
if update_users || update_permissions {
|
||||
println!(
|
||||
"\n{}: user group: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
user_group.name.bold(),
|
||||
);
|
||||
let mut lines = Vec::<String>::new();
|
||||
if update_users {
|
||||
let adding = user_group
|
||||
.users
|
||||
.iter()
|
||||
.filter(|user| !original_users.contains(user))
|
||||
.map(|user| user.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
let adding = if adding.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
adding.join(", ").green()
|
||||
};
|
||||
let removing = original_users
|
||||
.iter()
|
||||
.filter(|user| !user_group.users.contains(user))
|
||||
.map(|user| user.as_str())
|
||||
.collect::<Vec<_>>();
|
||||
let removing = if removing.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
removing.join(", ").red()
|
||||
};
|
||||
lines.push(format!(
|
||||
"{}: 'users'\n{}: {removing}\n{}: {adding}",
|
||||
"field".dimmed(),
|
||||
"removing".dimmed(),
|
||||
"adding".dimmed(),
|
||||
))
|
||||
}
|
||||
if update_permissions {
|
||||
let adding = user_group
|
||||
.permissions
|
||||
.iter()
|
||||
.filter(|permission| {
|
||||
!original_permissions.contains(permission)
|
||||
})
|
||||
.map(|permission| format!("{permission:?}"))
|
||||
.collect::<Vec<_>>();
|
||||
let adding = if adding.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
adding.join(", ").green()
|
||||
};
|
||||
let removing = original_permissions
|
||||
.iter()
|
||||
.filter(|permission| {
|
||||
!user_group.permissions.contains(permission)
|
||||
})
|
||||
.map(|permission| format!("{permission:?}"))
|
||||
.collect::<Vec<_>>();
|
||||
let removing = if removing.is_empty() {
|
||||
String::from("None").into()
|
||||
} else {
|
||||
removing.join(", ").red()
|
||||
};
|
||||
lines.push(format!(
|
||||
"{}: 'permissions'\n{}: {removing}\n{}: {adding}",
|
||||
"field".dimmed(),
|
||||
"removing".dimmed(),
|
||||
"adding".dimmed()
|
||||
))
|
||||
}
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
to_update.push(UpdateItem {
|
||||
user_group,
|
||||
update_users,
|
||||
update_permissions,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for d in &to_delete {
|
||||
println!(
|
||||
"\nUSER GROUPS TO CREATE: {}",
|
||||
to_create
|
||||
.iter()
|
||||
.map(|item| item.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
"\n{}: user group: '{}'\n-------------------",
|
||||
"DELETE".red(),
|
||||
d.name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
if !to_update.is_empty() {
|
||||
println!(
|
||||
"\nUSER GROUPS TO UPDATE: {}",
|
||||
to_update
|
||||
.iter()
|
||||
.map(|item| item.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update))
|
||||
Ok((to_create, to_update, to_delete))
|
||||
}
|
||||
|
||||
/// order permissions in deterministic way
|
||||
@@ -172,14 +278,13 @@ fn sort_permissions(
|
||||
|
||||
pub async fn run_updates(
|
||||
to_create: Vec<UserGroupToml>,
|
||||
to_update: Vec<UserGroupToml>,
|
||||
to_update: Vec<UpdateItem>,
|
||||
to_delete: Vec<DeleteItem>,
|
||||
) {
|
||||
let log_after = !to_update.is_empty() || !to_create.is_empty();
|
||||
|
||||
// Create the non-existant user groups
|
||||
for user_group in to_create {
|
||||
// Create the user group
|
||||
if let Err(e) = monitor_client()
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(CreateUserGroup {
|
||||
name: user_group.name.clone(),
|
||||
})
|
||||
@@ -190,43 +295,78 @@ pub async fn run_updates(
|
||||
user_group.name
|
||||
);
|
||||
continue;
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}'",
|
||||
"created".green().bold(),
|
||||
user_group.name.bold(),
|
||||
);
|
||||
};
|
||||
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
update_permissions(user_group.name, user_group.permissions).await;
|
||||
run_update_permissions(user_group.name, user_group.permissions)
|
||||
.await;
|
||||
}
|
||||
|
||||
// Update the existing user groups
|
||||
for user_group in to_update {
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
update_permissions(user_group.name, user_group.permissions).await;
|
||||
for UpdateItem {
|
||||
user_group,
|
||||
update_users,
|
||||
update_permissions,
|
||||
} in to_update
|
||||
{
|
||||
if update_users {
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
}
|
||||
if update_permissions {
|
||||
run_update_permissions(user_group.name, user_group.permissions)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
if log_after {
|
||||
info!("============ user groups synced ✅ ============");
|
||||
}
|
||||
}
|
||||
|
||||
async fn set_users(user_group: String, users: Vec<String>) {
|
||||
if !users.is_empty() {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(SetUsersInUserGroup {
|
||||
user_group: user_group.clone(),
|
||||
users,
|
||||
})
|
||||
for user_group in to_delete {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(DeleteUserGroup { id: user_group.id })
|
||||
.await
|
||||
{
|
||||
warn!("failed to set users in group {user_group} | {e:#}");
|
||||
warn!(
|
||||
"failed to delete user group {} | {e:#}",
|
||||
user_group.name
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}'",
|
||||
"deleted".red().bold(),
|
||||
user_group.name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_permissions(
|
||||
async fn set_users(user_group: String, users: Vec<String>) {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(SetUsersInUserGroup {
|
||||
user_group: user_group.clone(),
|
||||
users,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to set users in group {user_group} | {e:#}");
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}' users",
|
||||
"updated".blue().bold(),
|
||||
user_group.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_update_permissions(
|
||||
user_group: String,
|
||||
permissions: Vec<PermissionToml>,
|
||||
) {
|
||||
for PermissionToml { target, level } in permissions {
|
||||
if let Err(e) = monitor_client()
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(UpdatePermissionOnTarget {
|
||||
user_target: UserTarget::UserGroup(user_group.clone()),
|
||||
resource_target: target.clone(),
|
||||
@@ -237,6 +377,12 @@ async fn update_permissions(
|
||||
warn!(
|
||||
"failed to set permssion in group {user_group} | target: {target:?} | {e:#}",
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} user group '{}' permissions",
|
||||
"updated".blue().bold(),
|
||||
user_group.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
196
bin/cli/src/sync/variables.rs
Normal file
196
bin/cli/src/sync/variables.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateVariable, DeleteVariable, UpdateVariableDescription,
|
||||
UpdateVariableValue,
|
||||
},
|
||||
entities::variable::Variable,
|
||||
};
|
||||
|
||||
use crate::{maps::name_to_variable, state::monitor_client};
|
||||
|
||||
pub struct ToUpdateItem {
|
||||
pub variable: Variable,
|
||||
pub update_value: bool,
|
||||
pub update_description: bool,
|
||||
}
|
||||
|
||||
pub fn get_updates(
|
||||
variables: Vec<Variable>,
|
||||
delete: bool,
|
||||
) -> anyhow::Result<(Vec<Variable>, Vec<ToUpdateItem>, Vec<String>)> {
|
||||
let map = name_to_variable();
|
||||
|
||||
let mut to_create = Vec::<Variable>::new();
|
||||
let mut to_update = Vec::<ToUpdateItem>::new();
|
||||
let mut to_delete = Vec::<String>::new();
|
||||
|
||||
if delete {
|
||||
for variable in map.values() {
|
||||
if !variables.iter().any(|v| v.name == variable.name) {
|
||||
to_delete.push(variable.name.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for variable in variables {
|
||||
match map.get(&variable.name) {
|
||||
Some(original) => {
|
||||
let item = ToUpdateItem {
|
||||
update_value: original.value != variable.value,
|
||||
update_description: original.description
|
||||
!= variable.description,
|
||||
variable,
|
||||
};
|
||||
if !item.update_value && !item.update_description {
|
||||
continue;
|
||||
}
|
||||
println!(
|
||||
"\n{}: variable: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
item.variable.name.bold(),
|
||||
);
|
||||
|
||||
let mut lines = Vec::<String>::new();
|
||||
|
||||
if item.update_value {
|
||||
lines.push(format!(
|
||||
"{}: 'value'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.value.red(),
|
||||
"to".dimmed(),
|
||||
item.variable.value.green()
|
||||
))
|
||||
}
|
||||
|
||||
if item.update_description {
|
||||
lines.push(format!(
|
||||
"{}: 'description'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.description.red(),
|
||||
"to".dimmed(),
|
||||
item.variable.description.green()
|
||||
))
|
||||
}
|
||||
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
|
||||
to_update.push(item);
|
||||
}
|
||||
None => {
|
||||
println!(
|
||||
"\n{}: variable: {}\n{}: {}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
variable.name.bold().green(),
|
||||
"description".dimmed(),
|
||||
variable.description,
|
||||
"value".dimmed(),
|
||||
variable.value,
|
||||
);
|
||||
to_create.push(variable)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name in &to_delete {
|
||||
println!(
|
||||
"\n{}: variable: '{}'\n-------------------",
|
||||
"DELETE".red(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update, to_delete))
|
||||
}
|
||||
|
||||
pub async fn run_updates(
|
||||
to_create: Vec<Variable>,
|
||||
to_update: Vec<ToUpdateItem>,
|
||||
to_delete: Vec<String>,
|
||||
) {
|
||||
for variable in to_create {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(CreateVariable {
|
||||
name: variable.name.clone(),
|
||||
value: variable.value,
|
||||
description: variable.description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to create variable {} | {e:#}", variable.name);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}'",
|
||||
"created".green().bold(),
|
||||
variable.name.bold(),
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
for ToUpdateItem {
|
||||
variable,
|
||||
update_value,
|
||||
update_description,
|
||||
} in to_update
|
||||
{
|
||||
if update_value {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateVariableValue {
|
||||
name: variable.name.clone(),
|
||||
value: variable.value,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update variable value for {} | {e:#}",
|
||||
variable.name
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}' value",
|
||||
"updated".blue().bold(),
|
||||
variable.name.bold(),
|
||||
);
|
||||
};
|
||||
}
|
||||
if update_description {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateVariableDescription {
|
||||
name: variable.name.clone(),
|
||||
description: variable.description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update variable description for {} | {e:#}",
|
||||
variable.name
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}' description",
|
||||
"updated".blue().bold(),
|
||||
variable.name.bold(),
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
for variable in to_delete {
|
||||
if let Err(e) = crate::state::monitor_client()
|
||||
.write(DeleteVariable {
|
||||
name: variable.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to delete variable {variable} | {e:#}",);
|
||||
} else {
|
||||
info!(
|
||||
"{} variable '{}'",
|
||||
"deleted".red().bold(),
|
||||
variable.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,8 @@ version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "core"
|
||||
@@ -13,23 +15,25 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
monitor_client.workspace = true
|
||||
logger.workspace = true
|
||||
git.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
merge_config_files.workspace = true
|
||||
termination_signal.workspace = true
|
||||
async_timing_util.workspace = true
|
||||
partial_derive2.workspace = true
|
||||
derive_variants.workspace = true
|
||||
mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
parse_csl.workspace = true
|
||||
mungos.workspace = true
|
||||
serror.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
urlencoding.workspace = true
|
||||
async-trait.workspace = true
|
||||
aws-sdk-ec2.workspace = true
|
||||
aws-config.workspace = true
|
||||
tokio-util.workspace = true
|
||||
@@ -43,6 +47,7 @@ futures.workspace = true
|
||||
anyhow.workspace = true
|
||||
dotenv.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
tokio.workspace = true
|
||||
tower.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build Core
|
||||
FROM rust:1.78.0-bullseye as core-builder
|
||||
FROM rust:1.78.0-bookworm as core-builder
|
||||
WORKDIR /builder
|
||||
COPY . .
|
||||
RUN cargo build -p monitor_core --release
|
||||
@@ -13,9 +13,8 @@ RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
# FROM gcr.io/distroless/cc
|
||||
FROM debian:bullseye-slim
|
||||
RUN apt update && apt install -y ca-certificates
|
||||
FROM debian:bookworm-slim
|
||||
RUN apt update && apt install -y git ca-certificates
|
||||
COPY ./config_example/core.config.example.toml /config/config.toml
|
||||
COPY --from=core-builder /builder/target/release/core /
|
||||
COPY --from=frontend-builder /builder/frontend/dist /frontend
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use axum::{http::HeaderMap, routing::post, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::auth::*, entities::user::User};
|
||||
@@ -92,7 +91,6 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
})
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetLoginOptions, HeaderMap> for State {
|
||||
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
@@ -104,7 +102,6 @@ impl Resolve<GetLoginOptions, HeaderMap> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ExchangeForJwt, HeaderMap> for State {
|
||||
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
@@ -118,7 +115,6 @@ impl Resolve<ExchangeForJwt, HeaderMap> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetUser, HeaderMap> for State {
|
||||
#[instrument(name = "GetUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::time::Duration;
|
||||
use std::{collections::HashSet, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
api::execute::{
|
||||
@@ -9,29 +8,31 @@ use monitor_client::{
|
||||
},
|
||||
entities::{
|
||||
all_logs_success,
|
||||
build::Build,
|
||||
build::{Build, CloudRegistryConfig, ImageRegistry},
|
||||
builder::{AwsBuilderConfig, Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
server_template::AwsServerTemplateConfig,
|
||||
server_template::aws::AwsServerTemplateConfig,
|
||||
update::{Log, Update},
|
||||
user::{auto_redeploy_user, User},
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, to_bson, to_document},
|
||||
mongodb::{
|
||||
bson::{doc, to_bson, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::{
|
||||
api::{self, GetVersionResponse},
|
||||
PeripheryClient,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::{serialize_error, serialize_error_pretty};
|
||||
use serror::serialize_error_pretty;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{
|
||||
@@ -46,20 +47,23 @@ use crate::{
|
||||
helpers::{
|
||||
channel::build_cancel_channel,
|
||||
periphery_client,
|
||||
query::get_deployment_state,
|
||||
update::{add_update, make_update, update_update},
|
||||
query::{get_deployment_state, get_global_variables},
|
||||
update::update_update,
|
||||
},
|
||||
resource::{self, refresh_build_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RunBuild, User> for State {
|
||||
use crate::helpers::update::init_execution_update;
|
||||
|
||||
use super::ExecuteRequest;
|
||||
|
||||
impl Resolve<RunBuild, (User, Update)> for State {
|
||||
#[instrument(name = "RunBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RunBuild { build }: RunBuild,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
@@ -68,6 +72,9 @@ impl Resolve<RunBuild, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build)?;
|
||||
|
||||
// get the action state for the build (or insert default).
|
||||
let action_state =
|
||||
action_states().build.get_or_insert_default(&build.id).await;
|
||||
@@ -78,10 +85,8 @@ impl Resolve<RunBuild, User> for State {
|
||||
action_state.update(|state| state.building = true)?;
|
||||
|
||||
build.config.version.increment();
|
||||
|
||||
let mut update = make_update(&build, Operation::RunBuild, &user);
|
||||
update.in_progress();
|
||||
update.version = build.config.version.clone();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
@@ -115,13 +120,11 @@ impl Resolve<RunBuild, User> for State {
|
||||
anyhow::Ok(())
|
||||
};
|
||||
tokio::select! {
|
||||
_ = cancel_clone.cancelled() => {}
|
||||
_ = poll => {}
|
||||
_ = cancel_clone.cancelled() => {}
|
||||
_ = poll => {}
|
||||
}
|
||||
});
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) =
|
||||
@@ -141,6 +144,7 @@ impl Resolve<RunBuild, User> for State {
|
||||
};
|
||||
|
||||
let core_config = core_config();
|
||||
let variables = get_global_variables().await?;
|
||||
|
||||
// CLONE REPO
|
||||
|
||||
@@ -172,23 +176,67 @@ impl Resolve<RunBuild, User> for State {
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed build at clone repo | {e:#}");
|
||||
update.push_error_log("clone repo", serialize_error(&e));
|
||||
update
|
||||
.push_error_log("clone repo", serialize_error_pretty(&e));
|
||||
}
|
||||
}
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if all_logs_success(&update.logs) {
|
||||
let docker_token = core_config
|
||||
.docker_accounts
|
||||
.get(&build.config.docker_account)
|
||||
.cloned();
|
||||
// Interpolate variables / secrets into build args
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
for arg in &mut build.config.build_args {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&arg.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
arg.value = res;
|
||||
}
|
||||
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.into_iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
if !secret_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
docker_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
@@ -206,7 +254,7 @@ impl Resolve<RunBuild, User> for State {
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("error in build | {e:#}");
|
||||
update.push_error_log("build", serialize_error(&e))
|
||||
update.push_error_log("build", serialize_error_pretty(&e))
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -232,6 +280,7 @@ impl Resolve<RunBuild, User> for State {
|
||||
.await;
|
||||
}
|
||||
|
||||
// stop the cancel listening task from going forever
|
||||
cancel.cancel();
|
||||
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
@@ -266,6 +315,7 @@ impl Resolve<RunBuild, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(update))]
|
||||
async fn handle_early_return(
|
||||
mut update: Update,
|
||||
) -> anyhow::Result<Update> {
|
||||
@@ -288,13 +338,54 @@ async fn handle_early_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CancelBuild, User> for State {
|
||||
pub async fn validate_cancel_build(
|
||||
request: &ExecuteRequest,
|
||||
) -> anyhow::Result<()> {
|
||||
if let ExecuteRequest::CancelBuild(req) = request {
|
||||
let build = resource::get::<Build>(&req.build).await?;
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
let (latest_build, latest_cancel) = tokio::try_join!(
|
||||
db.updates.find_one(
|
||||
doc! {
|
||||
"operation": "RunBuild",
|
||||
"target.id": &build.id,
|
||||
},
|
||||
FindOneOptions::builder()
|
||||
.sort(doc! { "start_ts": -1 })
|
||||
.build(),
|
||||
),
|
||||
db.updates.find_one(
|
||||
doc! {
|
||||
"operation": "CancelBuild",
|
||||
"target.id": &build.id,
|
||||
},
|
||||
FindOneOptions::builder()
|
||||
.sort(doc! { "start_ts": -1 })
|
||||
.build(),
|
||||
)
|
||||
)?;
|
||||
|
||||
match (latest_build, latest_cancel) {
|
||||
(Some(build), Some(cancel)) => {
|
||||
if cancel.start_ts > build.start_ts {
|
||||
return Err(anyhow!("Build has already been cancelled"));
|
||||
}
|
||||
}
|
||||
(None, _) => return Err(anyhow!("No build in progress")),
|
||||
_ => {}
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Resolve<CancelBuild, (User, Update)> for State {
|
||||
#[instrument(name = "CancelBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CancelBuild { build }: CancelBuild,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<CancelBuildResponse> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
@@ -303,37 +394,24 @@ impl Resolve<CancelBuild, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// check if theres already an open cancel build update
|
||||
if db_client()
|
||||
// make sure the build is building
|
||||
if !action_states()
|
||||
.build
|
||||
.get(&build.id)
|
||||
.await
|
||||
.updates
|
||||
.find_one(
|
||||
doc! {
|
||||
"operation": "CancelBuild",
|
||||
"status": "InProgress",
|
||||
"target.id": &build.id,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query updates")?
|
||||
.is_some()
|
||||
.and_then(|s| s.get().ok().map(|s| s.building))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
return Err(anyhow!("Build cancel is already in progress"));
|
||||
return Err(anyhow!("Build is not building."));
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&build, Operation::CancelBuild, &user);
|
||||
|
||||
update.push_simple_log(
|
||||
"cancel triggered",
|
||||
"the build cancel has been triggered",
|
||||
);
|
||||
update.in_progress();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
update.id =
|
||||
add_update(make_update(&build, Operation::CancelBuild, &user))
|
||||
.await?;
|
||||
let update_id = update.id.clone();
|
||||
|
||||
build_cancel_channel()
|
||||
.sender
|
||||
@@ -341,6 +419,22 @@ impl Resolve<CancelBuild, User> for State {
|
||||
.await
|
||||
.send((build.id, update))?;
|
||||
|
||||
// Make sure cancel is set to complete after some time in case
|
||||
// no reciever is there to do it. Prevents update stuck in InProgress.
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_secs(60)).await;
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&update_id,
|
||||
doc! { "$set": { "status": "Complete" } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!("failed to set BuildCancel Update status Complete after timeout | {e:#}")
|
||||
}
|
||||
});
|
||||
|
||||
Ok(CancelBuildResponse {})
|
||||
}
|
||||
}
|
||||
@@ -348,7 +442,7 @@ impl Resolve<CancelBuild, User> for State {
|
||||
const BUILDER_POLL_RATE_SECS: u64 = 2;
|
||||
const BUILDER_POLL_MAX_TRIES: usize = 30;
|
||||
|
||||
#[instrument]
|
||||
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
|
||||
async fn get_build_builder(
|
||||
build: &Build,
|
||||
update: &mut Update,
|
||||
@@ -378,7 +472,7 @@ async fn get_build_builder(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument(skip_all, fields(build_id = build.id, update_id = update.id))]
|
||||
async fn get_aws_builder(
|
||||
build: &Build,
|
||||
config: AwsBuilderConfig,
|
||||
@@ -448,6 +542,8 @@ async fn get_aws_builder(
|
||||
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
|
||||
.await;
|
||||
}
|
||||
|
||||
// Spawn terminate task in failure case (if loop is passed without return)
|
||||
tokio::spawn(async move {
|
||||
let _ =
|
||||
terminate_ec2_instance_with_retry(config.region, &instance_id)
|
||||
@@ -455,10 +551,14 @@ async fn get_aws_builder(
|
||||
});
|
||||
|
||||
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
|
||||
Err(res.err().unwrap())
|
||||
Err(
|
||||
res.err().unwrap().context(
|
||||
"failed to start usable builder. terminating instance.",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(skip(periphery))]
|
||||
#[instrument(skip(periphery, update))]
|
||||
async fn cleanup_builder_instance(
|
||||
periphery: PeripheryClient,
|
||||
cleanup_data: BuildCleanupData,
|
||||
@@ -510,16 +610,26 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let state =
|
||||
get_deployment_state(&deployment).await.unwrap_or_default();
|
||||
if state == DeploymentState::Running {
|
||||
let res = State
|
||||
.resolve(
|
||||
Deploy {
|
||||
deployment: deployment.id.clone(),
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
},
|
||||
auto_redeploy_user().to_owned(),
|
||||
)
|
||||
.await;
|
||||
let req = super::ExecuteRequest::Deploy(Deploy {
|
||||
deployment: deployment.id.clone(),
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
});
|
||||
let user = auto_redeploy_user().to_owned();
|
||||
let res = async {
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
State
|
||||
.resolve(
|
||||
Deploy {
|
||||
deployment: deployment.id.clone(),
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
},
|
||||
(user, update),
|
||||
)
|
||||
.await
|
||||
}
|
||||
.await;
|
||||
Some((deployment.id.clone(), res))
|
||||
} else {
|
||||
None
|
||||
@@ -538,7 +648,8 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let (id, res) = res.unwrap();
|
||||
match res {
|
||||
Ok(_) => redeploys.push(id),
|
||||
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
|
||||
Err(e) => redeploy_failures
|
||||
.push(format!("{id}: {}", serialize_error_pretty(&e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -563,3 +674,32 @@ fn start_aws_builder_log(
|
||||
|
||||
format!("instance id: {instance_id}\nip: {ip}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}\nassign public ip: {assign_public_ip}\nuse public ip: {use_public_ip}")
|
||||
}
|
||||
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token matching requirements (otherwise it is left to periphery)
|
||||
fn validate_account_extract_registry_token(
|
||||
build: &Build,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
match &build.config.image_registry {
|
||||
ImageRegistry::None(_) => Ok(None),
|
||||
ImageRegistry::DockerHub(CloudRegistryConfig {
|
||||
account, ..
|
||||
}) => {
|
||||
if account.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must attach account to use DockerHub image registry"
|
||||
));
|
||||
}
|
||||
Ok(core_config().docker_accounts.get(account).cloned())
|
||||
}
|
||||
ImageRegistry::Ghcr(CloudRegistryConfig { account, .. }) => {
|
||||
if account.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Must attach account to use GithubContainerRegistry"
|
||||
));
|
||||
}
|
||||
Ok(core_config().github_accounts.get(account).cloned())
|
||||
}
|
||||
ImageRegistry::Custom(_) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
build::Build,
|
||||
build::{Build, ImageRegistry},
|
||||
deployment::{Deployment, DeploymentImage},
|
||||
get_image_name, monitor_timestamp,
|
||||
get_image_name,
|
||||
permission::PermissionLevel,
|
||||
server::ServerState,
|
||||
update::{Log, ResourceTarget, Update, UpdateStatus},
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
Operation, Version,
|
||||
Version,
|
||||
},
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
@@ -23,16 +24,17 @@ use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::get_server_with_status,
|
||||
update::{add_update, make_update, update_update},
|
||||
query::{get_global_variables, get_server_with_status},
|
||||
update::update_update,
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<Deploy, User> for State {
|
||||
use crate::helpers::update::init_execution_update;
|
||||
|
||||
impl Resolve<Deploy, (User, Update)> for State {
|
||||
#[instrument(name = "Deploy", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -41,7 +43,7 @@ impl Resolve<Deploy, User> for State {
|
||||
stop_signal,
|
||||
stop_time,
|
||||
}: Deploy,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let mut deployment =
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
@@ -51,6 +53,10 @@ impl Resolve<Deploy, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
@@ -62,10 +68,6 @@ impl Resolve<Deploy, User> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.deploying = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
@@ -76,6 +78,8 @@ impl Resolve<Deploy, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
// This block gets the version of the image to deploy in the Build case.
|
||||
// It also gets the name of the image from the build and attaches it directly.
|
||||
let version = match deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
@@ -85,36 +89,98 @@ impl Resolve<Deploy, User> for State {
|
||||
} else {
|
||||
version
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version}"),
|
||||
};
|
||||
if deployment.config.docker_account.is_empty() {
|
||||
deployment.config.docker_account =
|
||||
build.config.docker_account;
|
||||
// set image registry to match build docker account if it's not overridden by deployment
|
||||
if matches!(
|
||||
&deployment.config.image_registry,
|
||||
ImageRegistry::None(_)
|
||||
) {
|
||||
deployment.config.image_registry =
|
||||
build.config.image_registry;
|
||||
}
|
||||
version
|
||||
}
|
||||
DeploymentImage::Image { .. } => Version::default(),
|
||||
};
|
||||
|
||||
let mut update =
|
||||
make_update(&deployment, Operation::DeployContainer, &user);
|
||||
update.in_progress();
|
||||
let variables = get_global_variables().await?;
|
||||
let core_config = core_config();
|
||||
|
||||
// Interpolate variables into environment
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
for env in &mut deployment.config.environment {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&env.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
|
||||
// set env value with the result
|
||||
env.value = res;
|
||||
}
|
||||
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.into_iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
if !secret_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
update.version = version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let docker_token = core_config()
|
||||
.docker_accounts
|
||||
.get(&deployment.config.docker_account)
|
||||
.cloned();
|
||||
let registry_token = match &deployment.config.image_registry {
|
||||
ImageRegistry::None(_) => None,
|
||||
ImageRegistry::DockerHub(params) => {
|
||||
core_config.docker_accounts.get(¶ms.account).cloned()
|
||||
}
|
||||
ImageRegistry::Ghcr(params) => {
|
||||
core_config.github_accounts.get(¶ms.account).cloned()
|
||||
}
|
||||
ImageRegistry::Custom(_) => {
|
||||
return Err(anyhow!("Custom ImageRegistry not yet supported"))
|
||||
}
|
||||
};
|
||||
|
||||
match periphery
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal,
|
||||
stop_time,
|
||||
docker_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
@@ -136,13 +202,12 @@ impl Resolve<Deploy, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<StartContainer, User> for State {
|
||||
impl Resolve<StartContainer, (User, Update)> for State {
|
||||
#[instrument(name = "StartContainer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StartContainer { deployment }: StartContainer,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
@@ -176,20 +241,6 @@ impl Resolve<StartContainer, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
target: ResourceTarget::Deployment(deployment.id.clone()),
|
||||
operation: Operation::StartContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name.clone(),
|
||||
@@ -203,16 +254,15 @@ impl Resolve<StartContainer, User> for State {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<StopContainer, User> for State {
|
||||
impl Resolve<StopContainer, (User, Update)> for State {
|
||||
#[instrument(name = "StopContainer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -221,7 +271,7 @@ impl Resolve<StopContainer, User> for State {
|
||||
signal,
|
||||
time,
|
||||
}: StopContainer,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
@@ -255,11 +305,6 @@ impl Resolve<StopContainer, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&deployment, Operation::StopContainer, &user);
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name.clone(),
|
||||
@@ -279,21 +324,20 @@ impl Resolve<StopContainer, User> for State {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<StopAllContainers, User> for State {
|
||||
impl Resolve<StopAllContainers, (User, Update)> for State {
|
||||
#[instrument(name = "StopAllContainers", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopAllContainers { server }: StopAllContainers,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let (server, status) = get_server_with_status(&server).await?;
|
||||
if status != ServerState::Ok {
|
||||
@@ -323,23 +367,27 @@ impl Resolve<StopAllContainers, User> for State {
|
||||
.await
|
||||
.context("failed to find deployments on server")?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::StopAllContainers, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let futures = deployments.iter().map(|deployment| async {
|
||||
let req = super::ExecuteRequest::StopContainer(StopContainer {
|
||||
deployment: deployment.id.clone(),
|
||||
signal: None,
|
||||
time: None,
|
||||
});
|
||||
(
|
||||
self
|
||||
.resolve(
|
||||
StopContainer {
|
||||
deployment: deployment.id.clone(),
|
||||
signal: None,
|
||||
time: None,
|
||||
},
|
||||
user.clone(),
|
||||
)
|
||||
.await,
|
||||
async {
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
State
|
||||
.resolve(
|
||||
StopContainer {
|
||||
deployment: deployment.id.clone(),
|
||||
signal: None,
|
||||
time: None,
|
||||
},
|
||||
(user.clone(), update),
|
||||
)
|
||||
.await
|
||||
}
|
||||
.await,
|
||||
deployment.name.clone(),
|
||||
deployment.id.clone(),
|
||||
)
|
||||
@@ -370,8 +418,7 @@ impl Resolve<StopAllContainers, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RemoveContainer, User> for State {
|
||||
impl Resolve<RemoveContainer, (User, Update)> for State {
|
||||
#[instrument(name = "RemoveContainer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -380,7 +427,7 @@ impl Resolve<RemoveContainer, User> for State {
|
||||
signal,
|
||||
time,
|
||||
}: RemoveContainer,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
@@ -414,20 +461,6 @@ impl Resolve<RemoveContainer, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
target: ResourceTarget::Deployment(deployment.id.clone()),
|
||||
operation: Operation::RemoveContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name.clone(),
|
||||
|
||||
@@ -2,15 +2,25 @@ use std::time::Instant;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::execute::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::by_id::find_one_by_id;
|
||||
use resolver_api::{derive::Resolver, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use serror::{serialize_error_pretty, Json};
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{auth::auth_request, state::State};
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
helpers::update::{init_execution_update, update_update},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
mod build;
|
||||
mod deployment;
|
||||
@@ -18,17 +28,18 @@ mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod sync;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[resolver_args((User, Update))]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
enum ExecuteRequest {
|
||||
pub enum ExecuteRequest {
|
||||
// ==== SERVER ====
|
||||
PruneContainers(PruneDockerContainers),
|
||||
PruneImages(PruneDockerImages),
|
||||
PruneNetworks(PruneDockerNetworks),
|
||||
PruneContainers(PruneContainers),
|
||||
PruneImages(PruneImages),
|
||||
PruneNetworks(PruneNetworks),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
@@ -50,6 +61,9 @@ enum ExecuteRequest {
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
LaunchServer(LaunchServer),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -61,25 +75,58 @@ pub fn router() -> Router {
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteRequest>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
) -> serror::Result<Json<Update>> {
|
||||
let req_id = Uuid::new_v4();
|
||||
|
||||
let res = tokio::spawn(task(req_id, request, user))
|
||||
.await
|
||||
.context("failure in spawned execute task");
|
||||
// need to validate no cancel is active before any update is created.
|
||||
build::validate_cancel_build(&request).await?;
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {req_id} spawn error: {e:#}",);
|
||||
}
|
||||
let update = init_execution_update(&request, &user).await?;
|
||||
|
||||
Ok((TypedHeader(ContentType::json()), res??))
|
||||
let handle =
|
||||
tokio::spawn(task(req_id, request, user, update.clone()));
|
||||
|
||||
tokio::spawn({
|
||||
let update_id = update.id.clone();
|
||||
async move {
|
||||
let log = match handle.await {
|
||||
Ok(Err(e)) => {
|
||||
warn!("/execute request {req_id} task error: {e:#}",);
|
||||
Log::error("task error", serialize_error_pretty(&e))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("/execute request {req_id} spawn error: {e:?}",);
|
||||
Log::error("spawn error", format!("{e:#?}"))
|
||||
}
|
||||
_ => return,
|
||||
};
|
||||
let res = async {
|
||||
let mut update =
|
||||
find_one_by_id(&db_client().await.updates, &update_id)
|
||||
.await
|
||||
.context("failed to query to db")?
|
||||
.context("no update exists with given id")?;
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_update(update).await
|
||||
}
|
||||
.await;
|
||||
|
||||
if let Err(e) = res {
|
||||
warn!("failed to update update with task error log | {e:#}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Json(update))
|
||||
}
|
||||
|
||||
#[instrument(name = "ExecuteRequest", skip(user))]
|
||||
#[instrument(name = "ExecuteRequest", skip(user, update))]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: ExecuteRequest,
|
||||
user: User,
|
||||
update: Update,
|
||||
) -> anyhow::Result<String> {
|
||||
info!(
|
||||
"/execute request {req_id} | user: {} ({})",
|
||||
@@ -87,16 +134,15 @@ async fn task(
|
||||
);
|
||||
let timer = Instant::now();
|
||||
|
||||
let res =
|
||||
State
|
||||
.resolve_request(request, user)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
});
|
||||
let res = State
|
||||
.resolve_request(request, (user, update))
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
});
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {req_id} error: {e:#}");
|
||||
|
||||
@@ -1,30 +1,44 @@
|
||||
use async_trait::async_trait;
|
||||
use std::pin::Pin;
|
||||
|
||||
use monitor_client::{
|
||||
api::execute::RunProcedure,
|
||||
entities::{
|
||||
permission::PermissionLevel, procedure::Procedure,
|
||||
update::Update, user::User, Operation,
|
||||
update::Update, user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
procedure::execute_procedure,
|
||||
update::{add_update, make_update, update_update},
|
||||
}, resource, state::{action_states, State}
|
||||
helpers::{procedure::execute_procedure, update::update_update},
|
||||
resource::{self, refresh_procedure_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RunProcedure, User> for State {
|
||||
impl Resolve<RunProcedure, (User, Update)> for State {
|
||||
#[instrument(name = "RunProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RunProcedure { procedure }: RunProcedure,
|
||||
user: User,
|
||||
(user, update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
resolve_inner(procedure, user, update).await
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_inner(
|
||||
procedure: String,
|
||||
user: User,
|
||||
mut update: Update,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn std::future::Future<Output = anyhow::Result<Update>> + Send,
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
&procedure,
|
||||
&user,
|
||||
@@ -32,6 +46,14 @@ impl Resolve<RunProcedure, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Need to push the initial log, as execute_procedure
|
||||
// assumes first log is already created
|
||||
// and will panic otherwise.
|
||||
update.push_simple_log(
|
||||
"execute_procedure",
|
||||
format!("executing procedure {}", procedure.name),
|
||||
);
|
||||
|
||||
// get the action state for the procedure (or insert default).
|
||||
let action_state = action_states()
|
||||
.procedure
|
||||
@@ -43,16 +65,6 @@ impl Resolve<RunProcedure, User> for State {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&procedure, Operation::RunProcedure, &user);
|
||||
update.in_progress();
|
||||
update.push_simple_log(
|
||||
"execute procedure",
|
||||
format!("Executing procedure: {}", procedure.name),
|
||||
);
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let update = Mutex::new(update);
|
||||
|
||||
let res = execute_procedure(&procedure, &update).await;
|
||||
@@ -74,8 +86,23 @@ impl Resolve<RunProcedure, User> for State {
|
||||
|
||||
update.finalize();
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_procedure_state_cache().await;
|
||||
}
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
@@ -7,9 +6,8 @@ use monitor_client::{
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
update::{Log, ResourceTarget, Update, UpdateStatus},
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
@@ -22,21 +20,17 @@ use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
update::{add_update, update_update},
|
||||
},
|
||||
helpers::{periphery_client, update::update_update},
|
||||
resource::{self, refresh_repo_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CloneRepo, User> for State {
|
||||
impl Resolve<CloneRepo, (User, Update)> for State {
|
||||
#[instrument(name = "CloneRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CloneRepo { repo }: CloneRepo,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
@@ -63,20 +57,6 @@ impl Resolve<CloneRepo, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
operation: Operation::CloneRepo,
|
||||
target: ResourceTarget::Repo(repo.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let github_token = core_config()
|
||||
.github_accounts
|
||||
.get(&repo.config.github_account)
|
||||
@@ -99,20 +79,19 @@ impl Resolve<CloneRepo, User> for State {
|
||||
update.finalize();
|
||||
|
||||
if update.success {
|
||||
update_last_pulled(&repo.name).await;
|
||||
update_last_pulled_time(&repo.name).await;
|
||||
}
|
||||
|
||||
handle_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<PullRepo, User> for State {
|
||||
impl Resolve<PullRepo, (User, Update)> for State {
|
||||
#[instrument(name = "PullRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PullRepo { repo }: PullRepo,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
@@ -139,20 +118,6 @@ impl Resolve<PullRepo, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
operation: Operation::PullRepo,
|
||||
target: ResourceTarget::Repo(repo.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let logs = match periphery
|
||||
.request(api::git::PullRepo {
|
||||
name: repo.name.clone(),
|
||||
@@ -173,13 +138,14 @@ impl Resolve<PullRepo, User> for State {
|
||||
update.finalize();
|
||||
|
||||
if update.success {
|
||||
update_last_pulled(&repo.name).await;
|
||||
update_last_pulled_time(&repo.name).await;
|
||||
}
|
||||
|
||||
handle_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(update_id = update.id))]
|
||||
async fn handle_update_return(
|
||||
update: Update,
|
||||
) -> anyhow::Result<Update> {
|
||||
@@ -201,7 +167,8 @@ async fn handle_update_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn update_last_pulled(repo_name: &str) {
|
||||
#[instrument]
|
||||
async fn update_last_pulled_time(repo_name: &str) {
|
||||
let res = db_client()
|
||||
.await
|
||||
.repos
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
@@ -8,7 +7,6 @@ use monitor_client::{
|
||||
server::Server,
|
||||
update::{Log, Update, UpdateStatus},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
@@ -16,21 +14,17 @@ use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
helpers::{periphery_client, update::update_update},
|
||||
resource,
|
||||
state::{action_states, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<PruneDockerContainers, User> for State {
|
||||
#[instrument(name = "PruneDockerContainers", skip(self, user))]
|
||||
impl Resolve<PruneContainers, (User, Update)> for State {
|
||||
#[instrument(name = "PruneContainers", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PruneDockerContainers { server }: PruneDockerContainers,
|
||||
user: User,
|
||||
PruneContainers { server }: PruneContainers,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
@@ -52,11 +46,6 @@ impl Resolve<PruneDockerContainers, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::PruneContainersServer, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PruneContainers {})
|
||||
.await
|
||||
@@ -81,13 +70,12 @@ impl Resolve<PruneDockerContainers, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<PruneDockerNetworks, User> for State {
|
||||
#[instrument(name = "PruneDockerNetworks", skip(self, user))]
|
||||
impl Resolve<PruneNetworks, (User, Update)> for State {
|
||||
#[instrument(name = "PruneNetworks", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PruneDockerNetworks { server }: PruneDockerNetworks,
|
||||
user: User,
|
||||
PruneNetworks { server }: PruneNetworks,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
@@ -109,11 +97,6 @@ impl Resolve<PruneDockerNetworks, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::PruneNetworksServer, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::PruneNetworks {})
|
||||
.await
|
||||
@@ -138,13 +121,12 @@ impl Resolve<PruneDockerNetworks, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<PruneDockerImages, User> for State {
|
||||
#[instrument(name = "PruneDockerImages", skip(self, user))]
|
||||
impl Resolve<PruneImages, (User, Update)> for State {
|
||||
#[instrument(name = "PruneImages", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PruneDockerImages { server }: PruneDockerImages,
|
||||
user: User,
|
||||
PruneImages { server }: PruneImages,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
@@ -166,11 +148,6 @@ impl Resolve<PruneDockerImages, User> for State {
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::PruneImagesServer, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneImages {}).await {
|
||||
Ok(log) => log,
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::async_trait;
|
||||
use monitor_client::{
|
||||
api::{execute::LaunchServer, write::CreateServer},
|
||||
entities::{
|
||||
@@ -8,7 +7,6 @@ use monitor_client::{
|
||||
server_template::{ServerTemplate, ServerTemplateConfig},
|
||||
update::Update,
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
@@ -16,11 +14,13 @@ use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
cloud::aws::launch_ec2_instance, helpers::update::{add_update, make_update, update_update}, resource, state::{db_client, State}
|
||||
cloud::{aws::launch_ec2_instance, hetzner::launch_hetzner_server},
|
||||
helpers::update::update_update,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<LaunchServer, User> for State {
|
||||
impl Resolve<LaunchServer, (User, Update)> for State {
|
||||
#[instrument(name = "LaunchServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -28,7 +28,7 @@ impl Resolve<LaunchServer, User> for State {
|
||||
name,
|
||||
server_template,
|
||||
}: LaunchServer,
|
||||
user: User,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
// validate name isn't already taken by another server
|
||||
if db_client()
|
||||
@@ -54,29 +54,28 @@ impl Resolve<LaunchServer, User> for State {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&template, Operation::LaunchServer, &user);
|
||||
update.in_progress();
|
||||
update.push_simple_log(
|
||||
"launching server",
|
||||
format!("{:#?}", template.config),
|
||||
);
|
||||
update.id = add_update(update.clone()).await?;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let config = match template.config {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let region = config.region.clone();
|
||||
let instance = launch_ec2_instance(&name, config).await;
|
||||
if let Err(e) = &instance {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch aws instance\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
let instance = instance.unwrap();
|
||||
let instance = match launch_ec2_instance(&name, config).await
|
||||
{
|
||||
Ok(instance) => instance,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch aws instance\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
@@ -90,6 +89,34 @@ impl Resolve<LaunchServer, User> for State {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
let datacenter = config.datacenter;
|
||||
let server = match launch_hetzner_server(&name, config).await
|
||||
{
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch hetzner server\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {name} on ip {}",
|
||||
server.ip
|
||||
),
|
||||
);
|
||||
PartialServerConfig {
|
||||
address: format!("http://{}:8120", server.ip).into(),
|
||||
region: datacenter.as_ref().to_string().into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match self.resolve(CreateServer { name, config }, user).await {
|
||||
@@ -98,6 +125,7 @@ impl Resolve<LaunchServer, User> for State {
|
||||
"create server",
|
||||
format!("created server {} ({})", server.name, server.id),
|
||||
);
|
||||
update.other_data = server.id;
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
|
||||
391
bin/core/src/api/execute/sync.rs
Normal file
391
bin/core/src/api/execute/sync.rs
Normal file
@@ -0,0 +1,391 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
api::{execute::RunSync, write::RefreshResourceSyncPending},
|
||||
entities::{
|
||||
self,
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
update::{Log, Update},
|
||||
user::{sync_user, User},
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
query::get_id_to_tags,
|
||||
sync::{
|
||||
colored,
|
||||
resource::{
|
||||
get_updates_for_execution, AllResourcesById, ResourceSync,
|
||||
},
|
||||
},
|
||||
update::update_update,
|
||||
},
|
||||
resource::{self, refresh_resource_sync_state_cache},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<RunSync, (User, Update)> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
RunSync { sync }: RunSync,
|
||||
(user, mut update): (User, Update),
|
||||
) -> anyhow::Result<Update> {
|
||||
let sync = resource::get_check_permissions::<
|
||||
entities::sync::ResourceSync,
|
||||
>(&sync, &user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(anyhow!("resource sync repo not configured"));
|
||||
}
|
||||
|
||||
let (res, logs, hash, message) =
|
||||
crate::helpers::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
update.logs.extend(logs);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let resources = res?;
|
||||
|
||||
let all_resources = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
|
||||
let (servers_to_create, servers_to_update, servers_to_delete) =
|
||||
get_updates_for_execution::<Server>(
|
||||
resources.servers,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
) = get_updates_for_execution::<Deployment>(
|
||||
resources.deployments,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (builds_to_create, builds_to_update, builds_to_delete) =
|
||||
get_updates_for_execution::<Build>(
|
||||
resources.builds,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (repos_to_create, repos_to_update, repos_to_delete) =
|
||||
get_updates_for_execution::<Repo>(
|
||||
resources.repos,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
procedures_to_create,
|
||||
procedures_to_update,
|
||||
procedures_to_delete,
|
||||
) = get_updates_for_execution::<Procedure>(
|
||||
resources.procedures,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (builders_to_create, builders_to_update, builders_to_delete) =
|
||||
get_updates_for_execution::<Builder>(
|
||||
resources.builders,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (alerters_to_create, alerters_to_update, alerters_to_delete) =
|
||||
get_updates_for_execution::<Alerter>(
|
||||
resources.alerters,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
server_templates_to_create,
|
||||
server_templates_to_update,
|
||||
server_templates_to_delete,
|
||||
) = get_updates_for_execution::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
resource_syncs_to_create,
|
||||
resource_syncs_to_update,
|
||||
resource_syncs_to_delete,
|
||||
) = get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
variables_to_create,
|
||||
variables_to_update,
|
||||
variables_to_delete,
|
||||
) = crate::helpers::sync::variables::get_updates_for_execution(
|
||||
resources.variables,
|
||||
sync.config.delete,
|
||||
)
|
||||
.await?;
|
||||
let (
|
||||
user_groups_to_create,
|
||||
user_groups_to_update,
|
||||
user_groups_to_delete,
|
||||
) = crate::helpers::sync::user_groups::get_updates_for_execution(
|
||||
resources.user_groups,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if resource_syncs_to_create.is_empty()
|
||||
&& resource_syncs_to_update.is_empty()
|
||||
&& resource_syncs_to_delete.is_empty()
|
||||
&& server_templates_to_create.is_empty()
|
||||
&& server_templates_to_update.is_empty()
|
||||
&& server_templates_to_delete.is_empty()
|
||||
&& servers_to_create.is_empty()
|
||||
&& servers_to_update.is_empty()
|
||||
&& servers_to_delete.is_empty()
|
||||
&& deployments_to_create.is_empty()
|
||||
&& deployments_to_update.is_empty()
|
||||
&& deployments_to_delete.is_empty()
|
||||
&& builds_to_create.is_empty()
|
||||
&& builds_to_update.is_empty()
|
||||
&& builds_to_delete.is_empty()
|
||||
&& builders_to_create.is_empty()
|
||||
&& builders_to_update.is_empty()
|
||||
&& builders_to_delete.is_empty()
|
||||
&& alerters_to_create.is_empty()
|
||||
&& alerters_to_update.is_empty()
|
||||
&& alerters_to_delete.is_empty()
|
||||
&& repos_to_create.is_empty()
|
||||
&& repos_to_update.is_empty()
|
||||
&& repos_to_delete.is_empty()
|
||||
&& procedures_to_create.is_empty()
|
||||
&& procedures_to_update.is_empty()
|
||||
&& procedures_to_delete.is_empty()
|
||||
&& user_groups_to_create.is_empty()
|
||||
&& user_groups_to_update.is_empty()
|
||||
&& user_groups_to_delete.is_empty()
|
||||
&& variables_to_create.is_empty()
|
||||
&& variables_to_update.is_empty()
|
||||
&& variables_to_delete.is_empty()
|
||||
{
|
||||
update.push_simple_log(
|
||||
"No Changes",
|
||||
format!("{}. exiting.", colored("nothing to do", "green")),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
// =================
|
||||
|
||||
// No deps
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
crate::helpers::sync::variables::run_updates(
|
||||
variables_to_create,
|
||||
variables_to_update,
|
||||
variables_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
crate::helpers::sync::user_groups::run_updates(
|
||||
user_groups_to_create,
|
||||
user_groups_to_update,
|
||||
user_groups_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
entities::sync::ResourceSync::run_updates(
|
||||
resource_syncs_to_create,
|
||||
resource_syncs_to_update,
|
||||
resource_syncs_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
ServerTemplate::run_updates(
|
||||
server_templates_to_create,
|
||||
server_templates_to_update,
|
||||
server_templates_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Server::run_updates(
|
||||
servers_to_create,
|
||||
servers_to_update,
|
||||
servers_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Alerter::run_updates(
|
||||
alerters_to_create,
|
||||
alerters_to_update,
|
||||
alerters_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependent on server
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Builder::run_updates(
|
||||
builders_to_create,
|
||||
builders_to_update,
|
||||
builders_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Repo::run_updates(
|
||||
repos_to_create,
|
||||
repos_to_update,
|
||||
repos_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on builder
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Build::run_updates(
|
||||
builds_to_create,
|
||||
builds_to_update,
|
||||
builds_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on server / build
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Deployment::run_updates(
|
||||
deployments_to_create,
|
||||
deployments_to_update,
|
||||
deployments_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
// Dependant on everything
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Procedure::run_updates(
|
||||
procedures_to_create,
|
||||
procedures_to_update,
|
||||
procedures_to_delete,
|
||||
)
|
||||
.await,
|
||||
);
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db.resource_syncs,
|
||||
&sync.id,
|
||||
doc! {
|
||||
"$set": {
|
||||
"info.last_sync_ts": monitor_timestamp(),
|
||||
"info.last_sync_hash": hash,
|
||||
"info.last_sync_message": message,
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update resource sync {} info after sync | {e:#}",
|
||||
sync.name
|
||||
)
|
||||
}
|
||||
|
||||
if let Err(e) = State
|
||||
.resolve(
|
||||
RefreshResourceSyncPending { sync: sync.id },
|
||||
sync_user().to_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!("failed to refresh sync {} after run | {e:#}", sync.name);
|
||||
update.push_error_log(
|
||||
"refresh sync",
|
||||
format!(
|
||||
"failed to refresh sync pending after run | {}",
|
||||
serialize_error_pretty(&e)
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_resource_sync_state_cache().await;
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
fn maybe_extend(logs: &mut Vec<Log>, log: Option<Log>) {
|
||||
if let Some(log) = log {
|
||||
logs.push(log);
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod auth;
|
||||
pub mod execute;
|
||||
pub mod read;
|
||||
pub mod user;
|
||||
pub mod write;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
@@ -14,13 +13,13 @@ use mungos::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const NUM_ALERTS_PER_PAGE: u64 = 20;
|
||||
const NUM_ALERTS_PER_PAGE: u64 = 100;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListAlerts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -28,7 +27,7 @@ impl Resolve<ListAlerts, User> for State {
|
||||
user: User,
|
||||
) -> anyhow::Result<ListAlertsResponse> {
|
||||
let mut query = query.unwrap_or_default();
|
||||
if !user.admin {
|
||||
if !user.admin && !core_config().transparent_mode {
|
||||
let server_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Server,
|
||||
@@ -71,7 +70,6 @@ impl Resolve<ListAlerts, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetAlert, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -15,12 +14,12 @@ use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetAlerter, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -36,7 +35,6 @@ impl Resolve<GetAlerter, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListAlerters, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -47,14 +45,23 @@ impl Resolve<ListAlerters, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullAlerters, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullAlerters { query }: ListFullAlerters,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullAlertersResponse> {
|
||||
resource::list_full_for_user::<Alerter>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetAlertersSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAlertersSummary {}: GetAlertersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAlertersSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
|
||||
@@ -1,36 +1,33 @@
|
||||
use std::{collections::HashMap, str::FromStr, sync::OnceLock};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use async_trait::async_trait;
|
||||
use futures::TryStreamExt;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
build::{Build, BuildActionState, BuildListItem},
|
||||
build::{Build, BuildActionState, BuildListItem, BuildState},
|
||||
permission::PermissionLevel,
|
||||
update::{ResourceTargetVariant, UpdateStatus},
|
||||
update::UpdateStatus,
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{doc, oid::ObjectId},
|
||||
options::FindOptions,
|
||||
},
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::{Resolve, ResolveToString};
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
state::{action_states, build_state_cache, db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetBuild, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -46,7 +43,6 @@ impl Resolve<GetBuild, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListBuilds, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -57,7 +53,16 @@ impl Resolve<ListBuilds, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullBuilds, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullBuilds { query }: ListFullBuilds,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullBuildsResponse> {
|
||||
resource::list_full_for_user::<Build>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -80,45 +85,53 @@ impl Resolve<GetBuildActionState, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetBuildsSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildsSummary {}: GetBuildsSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuildsSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Build,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.builds
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all build documents")?;
|
||||
let res = GetBuildsSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
let builds = resource::list_full_for_user::<Build>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await
|
||||
.context("failed to get all builds")?;
|
||||
|
||||
let mut res = GetBuildsSummaryResponse::default();
|
||||
|
||||
let cache = build_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for build in builds {
|
||||
res.total += 1;
|
||||
|
||||
match (
|
||||
cache.get(&build.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.build
|
||||
.get(&build.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.building => {
|
||||
res.building += 1;
|
||||
}
|
||||
(BuildState::Ok, _) => res.ok += 1,
|
||||
(BuildState::Failed, _) => res.failed += 1,
|
||||
(BuildState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(BuildState::Building, _) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
const ONE_DAY_MS: i64 = 86400000;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetBuildMonthlyStats, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -180,15 +193,11 @@ fn ms_to_hour(duration: i64) -> f64 {
|
||||
duration as f64 / MS_TO_HOUR_DIVISOR
|
||||
}
|
||||
|
||||
const NUM_VERSIONS_PER_PAGE: u64 = 10;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetBuildVersions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildVersions {
|
||||
build,
|
||||
page,
|
||||
major,
|
||||
minor,
|
||||
patch,
|
||||
@@ -224,11 +233,7 @@ impl Resolve<GetBuildVersions, User> for State {
|
||||
let versions = find_collect(
|
||||
&db_client().await.updates,
|
||||
filter,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "_id": -1 })
|
||||
.limit(NUM_VERSIONS_PER_PAGE as i64)
|
||||
.skip(page as u64 * NUM_VERSIONS_PER_PAGE)
|
||||
.build(),
|
||||
FindOptions::builder().sort(doc! { "_id": -1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to pull versions from mongo")?
|
||||
@@ -241,6 +246,24 @@ impl Resolve<GetBuildVersions, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
fn github_organizations() -> &'static String {
|
||||
static GITHUB_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
|
||||
GITHUB_ORGANIZATIONS.get_or_init(|| {
|
||||
serde_json::to_string(&core_config().github_organizations)
|
||||
.expect("failed to serialize github organizations")
|
||||
})
|
||||
}
|
||||
|
||||
impl ResolveToString<ListGithubOrganizations, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListGithubOrganizations {}: ListGithubOrganizations,
|
||||
_: User,
|
||||
) -> anyhow::Result<String> {
|
||||
Ok(github_organizations().clone())
|
||||
}
|
||||
}
|
||||
|
||||
fn docker_organizations() -> &'static String {
|
||||
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
|
||||
DOCKER_ORGANIZATIONS.get_or_init(|| {
|
||||
@@ -249,7 +272,6 @@ fn docker_organizations() -> &'static String {
|
||||
})
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ResolveToString<ListDockerOrganizations, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
@@ -259,3 +281,28 @@ impl ResolveToString<ListDockerOrganizations, User> for State {
|
||||
Ok(docker_organizations().clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListCommonBuildExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListCommonBuildExtraArgs { query }: ListCommonBuildExtraArgs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListCommonBuildExtraArgsResponse> {
|
||||
let builds = resource::list_full_for_user::<Build>(query, &user)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
|
||||
// first collect with guaranteed uniqueness
|
||||
let mut res = HashSet::<String>::new();
|
||||
|
||||
for build in builds {
|
||||
for extra_arg in build.config.extra_args {
|
||||
res.insert(extra_arg);
|
||||
}
|
||||
}
|
||||
|
||||
let mut res = res.into_iter().collect::<Vec<_>>();
|
||||
res.sort();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{self, *},
|
||||
entities::{
|
||||
@@ -21,7 +20,6 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetBuilder, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -37,7 +35,6 @@ impl Resolve<GetBuilder, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListBuilders, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -48,14 +45,23 @@ impl Resolve<ListBuilders, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullBuilders, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullBuilders { query }: ListFullBuilders,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullBuildersResponse> {
|
||||
resource::list_full_for_user::<Builder>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildersSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildersSummary {}: GetBuildersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuildersSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
@@ -84,7 +90,6 @@ impl Resolve<GetBuildersSummary, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetBuilderAvailableAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -105,7 +110,7 @@ impl Resolve<GetBuilderAvailableAccounts, User> for State {
|
||||
let res = self
|
||||
.resolve(
|
||||
read::GetAvailableAccounts {
|
||||
server: config.server_id,
|
||||
server: Some(config.server_id),
|
||||
},
|
||||
user,
|
||||
)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{cmp, collections::HashSet, str::FromStr};
|
||||
use std::{cmp, collections::HashSet};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -11,26 +10,19 @@ use monitor_client::{
|
||||
},
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, ResourceTargetVariant},
|
||||
update::Log,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client, query::get_resource_ids_for_non_admin,
|
||||
},
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
state::{action_states, db_client, deployment_status_cache, State},
|
||||
state::{action_states, deployment_status_cache, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDeployment, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -46,7 +38,6 @@ impl Resolve<GetDeployment, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListDeployments, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -57,7 +48,16 @@ impl Resolve<ListDeployments, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullDeployments, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullDeployments { query }: ListFullDeployments,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullDeploymentsResponse> {
|
||||
resource::list_full_for_user::<Deployment>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDeploymentContainer, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -84,7 +84,6 @@ impl Resolve<GetDeploymentContainer, User> for State {
|
||||
|
||||
const MAX_LOG_LENGTH: u64 = 5000;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -115,7 +114,6 @@ impl Resolve<GetLog, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<SearchLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -151,7 +149,6 @@ impl Resolve<SearchLog, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDeploymentStats, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -179,7 +176,6 @@ impl Resolve<GetDeploymentStats, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDeploymentActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -202,34 +198,18 @@ impl Resolve<GetDeploymentActionState, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDeploymentsSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDeploymentsSummary {}: GetDeploymentsSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetDeploymentsSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Deployment,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
|
||||
let deployments =
|
||||
find_collect(&db_client().await.deployments, query, None)
|
||||
.await
|
||||
.context("failed to count all deployment documents")?;
|
||||
let deployments = resource::list_full_for_user::<Deployment>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await
|
||||
.context("failed to get deployments from db")?;
|
||||
let mut res = GetDeploymentsSummaryResponse::default();
|
||||
let status_cache = deployment_status_cache();
|
||||
for deployment in deployments {
|
||||
@@ -255,13 +235,12 @@ impl Resolve<GetDeploymentsSummary, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListCommonExtraArgs, User> for State {
|
||||
impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListCommonExtraArgs { query }: ListCommonExtraArgs,
|
||||
ListCommonDeploymentExtraArgs { query }: ListCommonDeploymentExtraArgs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListCommonExtraArgsResponse> {
|
||||
) -> anyhow::Result<ListCommonDeploymentExtraArgsResponse> {
|
||||
let deployments =
|
||||
resource::list_full_for_user::<Deployment>(query, &user)
|
||||
.await
|
||||
@@ -276,6 +255,8 @@ impl Resolve<ListCommonExtraArgs, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res.into_iter().collect())
|
||||
let mut res = res.into_iter().collect::<Vec<_>>();
|
||||
res.sort();
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::read::*, entities::user::User};
|
||||
use resolver_api::{
|
||||
derive::Resolver, Resolve, ResolveToString, Resolver,
|
||||
};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
@@ -26,11 +23,13 @@ mod repo;
|
||||
mod search;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod toml;
|
||||
mod update;
|
||||
mod user;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
@@ -62,16 +61,19 @@ enum ReadRequest {
|
||||
GetProcedure(GetProcedure),
|
||||
GetProcedureActionState(GetProcedureActionState),
|
||||
ListProcedures(ListProcedures),
|
||||
ListFullProcedures(ListFullProcedures),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
GetServerTemplate(GetServerTemplate),
|
||||
ListServerTemplates(ListServerTemplates),
|
||||
ListFullServerTemplates(ListFullServerTemplates),
|
||||
GetServerTemplatesSummary(GetServerTemplatesSummary),
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
ListServers(ListServers),
|
||||
ListFullServers(ListFullServers),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetDockerContainers(GetDockerContainers),
|
||||
@@ -86,20 +88,25 @@ enum ReadRequest {
|
||||
GetDeploymentsSummary(GetDeploymentsSummary),
|
||||
GetDeployment(GetDeployment),
|
||||
ListDeployments(ListDeployments),
|
||||
ListFullDeployments(ListFullDeployments),
|
||||
GetDeploymentContainer(GetDeploymentContainer),
|
||||
GetDeploymentActionState(GetDeploymentActionState),
|
||||
GetDeploymentStats(GetDeploymentStats),
|
||||
GetLog(GetLog),
|
||||
SearchLog(SearchLog),
|
||||
ListCommonExtraArgs(ListCommonExtraArgs),
|
||||
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
|
||||
|
||||
// ==== BUILD ====
|
||||
GetBuildsSummary(GetBuildsSummary),
|
||||
GetBuild(GetBuild),
|
||||
ListBuilds(ListBuilds),
|
||||
ListFullBuilds(ListFullBuilds),
|
||||
GetBuildActionState(GetBuildActionState),
|
||||
GetBuildMonthlyStats(GetBuildMonthlyStats),
|
||||
GetBuildVersions(GetBuildVersions),
|
||||
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
|
||||
#[to_string_resolver]
|
||||
ListGithubOrganizations(ListGithubOrganizations),
|
||||
#[to_string_resolver]
|
||||
ListDockerOrganizations(ListDockerOrganizations),
|
||||
|
||||
@@ -107,18 +114,28 @@ enum ReadRequest {
|
||||
GetReposSummary(GetReposSummary),
|
||||
GetRepo(GetRepo),
|
||||
ListRepos(ListRepos),
|
||||
ListFullRepos(ListFullRepos),
|
||||
GetRepoActionState(GetRepoActionState),
|
||||
|
||||
// ==== SYNC ====
|
||||
GetResourceSyncsSummary(GetResourceSyncsSummary),
|
||||
GetResourceSync(GetResourceSync),
|
||||
ListResourceSyncs(ListResourceSyncs),
|
||||
ListFullResourceSyncs(ListFullResourceSyncs),
|
||||
GetResourceSyncActionState(GetResourceSyncActionState),
|
||||
|
||||
// ==== BUILDER ====
|
||||
GetBuildersSummary(GetBuildersSummary),
|
||||
GetBuilder(GetBuilder),
|
||||
ListBuilders(ListBuilders),
|
||||
ListFullBuilders(ListFullBuilders),
|
||||
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
|
||||
|
||||
// ==== ALERTER ====
|
||||
GetAlertersSummary(GetAlertersSummary),
|
||||
GetAlerter(GetAlerter),
|
||||
ListAlerters(ListAlerters),
|
||||
ListFullAlerters(ListFullAlerters),
|
||||
|
||||
// ==== TOML ====
|
||||
ExportAllResourcesToToml(ExportAllResourcesToToml),
|
||||
@@ -143,6 +160,10 @@ enum ReadRequest {
|
||||
GetSystemStats(GetSystemStats),
|
||||
#[to_string_resolver]
|
||||
GetSystemProcesses(GetSystemProcesses),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
GetVariable(GetVariable),
|
||||
ListVariables(ListVariables),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -180,7 +201,6 @@ async fn handler(
|
||||
Ok((TypedHeader(ContentType::json()), res?))
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetVersion, User> for State {
|
||||
#[instrument(name = "GetVersion", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
@@ -194,7 +214,6 @@ impl Resolve<GetVersion, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetCoreInfo, User> for State {
|
||||
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
@@ -210,6 +229,8 @@ impl Resolve<GetCoreInfo, User> for State {
|
||||
.github_webhook_base_url
|
||||
.clone()
|
||||
.unwrap_or_else(|| config.host.clone()),
|
||||
transparent_mode: config.transparent_mode,
|
||||
ui_write_disabled: config.ui_write_disabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
|
||||
@@ -16,7 +15,6 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListPermissions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -36,7 +34,6 @@ impl Resolve<ListPermissions, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetPermissionLevel, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -51,7 +48,6 @@ impl Resolve<GetPermissionLevel, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListUserTargetPermissions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
|
||||
@@ -1,29 +1,19 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetProcedure, GetProcedureActionState,
|
||||
GetProcedureActionStateResponse, GetProcedureResponse,
|
||||
GetProceduresSummary, GetProceduresSummaryResponse,
|
||||
ListProcedures, ListProceduresResponse,
|
||||
},
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, procedure::Procedure,
|
||||
update::ResourceTargetVariant, user::User,
|
||||
permission::PermissionLevel,
|
||||
procedure::{Procedure, ProcedureState},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
state::{action_states, procedure_state_cache, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetProcedure, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -39,7 +29,6 @@ impl Resolve<GetProcedure, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListProcedures, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -50,43 +39,61 @@ impl Resolve<ListProcedures, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullProcedures, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullProcedures { query }: ListFullProcedures,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullProceduresResponse> {
|
||||
resource::list_full_for_user::<Procedure>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetProceduresSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetProceduresSummary {}: GetProceduresSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetProceduresSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Procedure,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.procedures
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all procedure documents")?;
|
||||
let res = GetProceduresSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
let procedures = resource::list_full_for_user::<Procedure>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await
|
||||
.context("failed to get procedures from db")?;
|
||||
|
||||
let mut res = GetProceduresSummaryResponse::default();
|
||||
|
||||
let cache = procedure_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for procedure in procedures {
|
||||
res.total += 1;
|
||||
|
||||
match (
|
||||
cache.get(&procedure.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.procedure
|
||||
.get(&procedure.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.running => {
|
||||
res.running += 1;
|
||||
}
|
||||
(ProcedureState::Ok, _) => res.ok += 1,
|
||||
(ProcedureState::Failed, _) => res.failed += 1,
|
||||
(ProcedureState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the running state, since that comes from action states
|
||||
(ProcedureState::Running, _) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetProcedureActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
|
||||
@@ -1,26 +1,19 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
repo::{Repo, RepoActionState, RepoListItem},
|
||||
update::ResourceTargetVariant,
|
||||
repo::{Repo, RepoActionState, RepoListItem, RepoState},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
state::{action_states, repo_state_cache, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetRepo, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -36,7 +29,6 @@ impl Resolve<GetRepo, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListRepos, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -47,7 +39,16 @@ impl Resolve<ListRepos, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullRepos, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullRepos { query }: ListFullRepos,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullReposResponse> {
|
||||
resource::list_full_for_user::<Repo>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetRepoActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -70,38 +71,50 @@ impl Resolve<GetRepoActionState, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetReposSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetReposSummary {}: GetReposSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetReposSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Alerter,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.repos
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all build documents")?;
|
||||
let res = GetReposSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
let repos =
|
||||
resource::list_full_for_user::<Repo>(Default::default(), &user)
|
||||
.await
|
||||
.context("failed to get repos from db")?;
|
||||
|
||||
let mut res = GetReposSummaryResponse::default();
|
||||
|
||||
let cache = repo_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for repo in repos {
|
||||
res.total += 1;
|
||||
|
||||
match (
|
||||
cache.get(&repo.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.repo
|
||||
.get(&repo.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.cloning => {
|
||||
res.cloning += 1;
|
||||
}
|
||||
(_, action_states) if action_states.pulling => {
|
||||
res.pulling += 1;
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _) | (RepoState::Pulling, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{FindResources, FindResourcesResponse},
|
||||
entities::{
|
||||
@@ -19,7 +18,6 @@ const FIND_RESOURCE_TYPES: [ResourceTargetVariant; 5] = [
|
||||
ResourceTargetVariant::Procedure,
|
||||
];
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<FindResources, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
|
||||
@@ -7,7 +7,6 @@ use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{
|
||||
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -35,7 +34,6 @@ use crate::{
|
||||
state::{action_states, db_client, server_status_cache, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetServersSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -64,7 +62,6 @@ impl Resolve<GetServersSummary, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetPeripheryVersion, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -86,7 +83,6 @@ impl Resolve<GetPeripheryVersion, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetServer, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -102,7 +98,6 @@ impl Resolve<GetServer, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListServers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -113,7 +108,16 @@ impl Resolve<ListServers, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullServers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullServers { query }: ListFullServers,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullServersResponse> {
|
||||
resource::list_full_for_user::<Server>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetServerState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -137,7 +141,6 @@ impl Resolve<GetServerState, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetServerActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -169,7 +172,6 @@ fn system_info_cache() -> &'static SystemInfoCache {
|
||||
SYSTEM_INFO_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ResolveToString<GetSystemInformation, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
@@ -205,7 +207,6 @@ impl ResolveToString<GetSystemInformation, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ResolveToString<GetSystemStats, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
@@ -239,7 +240,6 @@ fn processes_cache() -> &'static ProcessesCache {
|
||||
PROCESSES_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ResolveToString<GetSystemProcesses, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
@@ -276,7 +276,6 @@ impl ResolveToString<GetSystemProcesses, User> for State {
|
||||
|
||||
const STATS_PER_PAGE: i64 = 500;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetHistoricalServerStats, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -330,7 +329,6 @@ impl Resolve<GetHistoricalServerStats, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDockerImages, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -349,7 +347,6 @@ impl Resolve<GetDockerImages, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDockerNetworks, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -368,7 +365,6 @@ impl Resolve<GetDockerNetworks, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetDockerContainers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -387,25 +383,30 @@ impl Resolve<GetDockerContainers, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetAvailableAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAvailableAccounts { server }: GetAvailableAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAvailableAccountsResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let (github, docker) = match server {
|
||||
Some(server) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let GetAccountsResponse { github, docker } =
|
||||
periphery_client(&server)?
|
||||
.request(api::GetAccounts {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
let GetAccountsResponse { github, docker } =
|
||||
periphery_client(&server)?
|
||||
.request(api::GetAccounts {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
(github, docker)
|
||||
}
|
||||
None => Default::default(),
|
||||
};
|
||||
|
||||
let mut github_set = HashSet::<String>::new();
|
||||
|
||||
@@ -428,7 +429,6 @@ impl Resolve<GetAvailableAccounts, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetAvailableSecrets, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -441,10 +441,11 @@ impl Resolve<GetAvailableSecrets, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let secrets = periphery_client(&server)?
|
||||
let mut secrets = periphery_client(&server)?
|
||||
.request(api::GetSecrets {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
secrets.sort();
|
||||
Ok(secrets)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetServerTemplate, GetServerTemplateResponse,
|
||||
GetServerTemplatesSummary, GetServerTemplatesSummaryResponse,
|
||||
ListServerTemplates, ListServerTemplatesResponse,
|
||||
},
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
update::ResourceTargetVariant, user::User,
|
||||
@@ -17,10 +12,11 @@ use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin, resource, state::{db_client, State}
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetServerTemplate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -36,7 +32,6 @@ impl Resolve<GetServerTemplate, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListServerTemplates, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -47,7 +42,16 @@ impl Resolve<ListServerTemplates, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListFullServerTemplates, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullServerTemplates { query }: ListFullServerTemplates,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullServerTemplatesResponse> {
|
||||
resource::list_full_for_user::<ServerTemplate>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetServerTemplatesSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -72,10 +76,10 @@ impl Resolve<GetServerTemplatesSummary, User> for State {
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.builders
|
||||
.server_templates
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all builder documents")?;
|
||||
.context("failed to count all server template documents")?;
|
||||
let res = GetServerTemplatesSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
|
||||
139
bin/core/src/api/read/sync.rs
Normal file
139
bin/core/src/api/read/sync.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
sync::{
|
||||
PendingSyncUpdatesData, ResourceSync, ResourceSyncActionState,
|
||||
ResourceSyncListItem, ResourceSyncState,
|
||||
},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
resource,
|
||||
state::{action_states, resource_sync_state_cache, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetResourceSync, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetResourceSync { sync }: GetResourceSync,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
&sync,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListResourceSyncs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListResourceSyncs { query }: ListResourceSyncs,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ResourceSyncListItem>> {
|
||||
resource::list_for_user::<ResourceSync>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListFullResourceSyncs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListFullResourceSyncs { query }: ListFullResourceSyncs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListFullResourceSyncsResponse> {
|
||||
resource::list_full_for_user::<ResourceSync>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetResourceSyncActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetResourceSyncActionState { sync }: GetResourceSyncActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSyncActionState> {
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
&sync,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.get(&sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetResourceSyncsSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetResourceSyncsSummary {}: GetResourceSyncsSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetResourceSyncsSummaryResponse> {
|
||||
let resource_syncs =
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resource_syncs from db")?;
|
||||
|
||||
let mut res = GetResourceSyncsSummaryResponse::default();
|
||||
|
||||
let cache = resource_sync_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for resource_sync in resource_syncs {
|
||||
res.total += 1;
|
||||
|
||||
match resource_sync.info.pending.data {
|
||||
PendingSyncUpdatesData::Ok(data) => {
|
||||
if !data.no_updates() {
|
||||
res.pending += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
PendingSyncUpdatesData::Err(_) => {
|
||||
res.failed += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
match (
|
||||
cache.get(&resource_sync.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.resource_sync
|
||||
.get(&resource_sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.syncing => {
|
||||
res.syncing += 1;
|
||||
}
|
||||
(ResourceSyncState::Ok, _) => res.ok += 1,
|
||||
(ResourceSyncState::Failed, _) => res.failed += 1,
|
||||
(ResourceSyncState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(ResourceSyncState::Syncing, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
(ResourceSyncState::Pending, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
api::read::{GetTag, ListTags},
|
||||
entities::{tag::Tag, user::User},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -12,7 +12,6 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetTag, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -23,15 +22,18 @@ impl Resolve<GetTag, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListTags, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListTags { query }: ListTags,
|
||||
_: User,
|
||||
) -> anyhow::Result<Vec<Tag>> {
|
||||
find_collect(&db_client().await.tags, query, None)
|
||||
.await
|
||||
.context("failed to get tags from db")
|
||||
find_collect(
|
||||
&db_client().await.tags,
|
||||
query,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to get tags from db")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::async_trait;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
execute::Execution,
|
||||
@@ -19,9 +18,10 @@ use monitor_client::{
|
||||
permission::{PermissionLevel, UserTarget},
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
resource::Resource,
|
||||
resource::{Resource, ResourceQuery},
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
sync::ResourceSync,
|
||||
toml::{
|
||||
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
|
||||
},
|
||||
@@ -30,44 +30,53 @@ use monitor_client::{
|
||||
},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use partial_derive2::PartialDiff;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user_user_group_ids,
|
||||
resource,
|
||||
resource::{self, MonitorResource},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ExportAllResourcesToToml {}: ExportAllResourcesToToml,
|
||||
ExportAllResourcesToToml { tags }: ExportAllResourcesToToml,
|
||||
user: User,
|
||||
) -> anyhow::Result<ExportAllResourcesToTomlResponse> {
|
||||
let mut targets = Vec::<ResourceTarget>::new();
|
||||
|
||||
targets.extend(
|
||||
resource::list_for_user::<Alerter>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
resource::list_for_user::<Alerter>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Builder>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
resource::list_for_user::<Builder>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Server>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
resource::list_for_user::<Server>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Deployment>(
|
||||
Default::default(),
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
@@ -75,26 +84,35 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Build>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
resource::list_for_user::<Build>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Repo>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
resource::list_for_user::<Repo>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Procedure>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
resource::list_for_user::<Procedure>(
|
||||
ResourceQuery::builder().tags(tags.clone()).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
Default::default(),
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
@@ -118,6 +136,7 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
ExportResourcesToToml {
|
||||
targets,
|
||||
user_groups,
|
||||
include_variables: true,
|
||||
},
|
||||
user,
|
||||
)
|
||||
@@ -125,13 +144,13 @@ impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ExportResourcesToToml, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ExportResourcesToToml {
|
||||
targets,
|
||||
user_groups,
|
||||
include_variables,
|
||||
}: ExportResourcesToToml,
|
||||
user: User,
|
||||
) -> anyhow::Result<ExportResourcesToTomlResponse> {
|
||||
@@ -148,7 +167,20 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
res.alerters.push(convert_resource(alerter, &names.tags))
|
||||
res
|
||||
.alerters
|
||||
.push(convert_resource::<Alerter>(alerter, &names.tags))
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
res
|
||||
.resource_syncs
|
||||
.push(convert_resource::<ResourceSync>(sync, &names.tags))
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
let template = resource::get_check_permissions::<
|
||||
@@ -157,9 +189,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
res
|
||||
.server_templates
|
||||
.push(convert_resource(template, &names.tags))
|
||||
res.server_templates.push(
|
||||
convert_resource::<ServerTemplate>(template, &names.tags),
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
@@ -168,7 +200,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
res.servers.push(convert_resource(server, &names.tags))
|
||||
res
|
||||
.servers
|
||||
.push(convert_resource::<Server>(server, &names.tags))
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder =
|
||||
@@ -184,7 +218,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
names.servers.get(&id).unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
res.builders.push(convert_resource(builder, &names.tags))
|
||||
res
|
||||
.builders
|
||||
.push(convert_resource::<Builder>(builder, &names.tags))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
@@ -200,7 +236,9 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
.get(&build.config.builder_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
res.builds.push(convert_resource(build, &names.tags))
|
||||
res
|
||||
.builds
|
||||
.push(convert_resource::<Build>(build, &names.tags))
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
let mut deployment = resource::get_check_permissions::<
|
||||
@@ -224,9 +262,10 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
names.builds.get(build_id).unwrap_or(&String::new()),
|
||||
);
|
||||
}
|
||||
res
|
||||
.deployments
|
||||
.push(convert_resource(deployment, &names.tags))
|
||||
res.deployments.push(convert_resource::<Deployment>(
|
||||
deployment,
|
||||
&names.tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
@@ -242,7 +281,7 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
.get(&repo.config.server_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
res.repos.push(convert_resource(repo, &names.tags))
|
||||
res.repos.push(convert_resource::<Repo>(repo, &names.tags))
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
add_procedure(&id, &mut res, &user, &names)
|
||||
@@ -259,6 +298,13 @@ impl Resolve<ExportResourcesToToml, User> for State {
|
||||
.await
|
||||
.context("failed to add user groups")?;
|
||||
|
||||
if include_variables {
|
||||
res.variables =
|
||||
find_collect(&db_client().await.variables, None, None)
|
||||
.await
|
||||
.context("failed to get variables from db")?;
|
||||
}
|
||||
|
||||
let toml = toml::to_string(&res)
|
||||
.context("failed to serialize resources to toml")?;
|
||||
|
||||
@@ -278,67 +324,76 @@ async fn add_procedure(
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
for execution in &mut procedure.config.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::RunProcedure(exec) => exec.procedure.clone_from(
|
||||
names
|
||||
.procedures
|
||||
.get(&exec.procedure)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunBuild(exec) => exec.build.clone_from(
|
||||
names.builds.get(&exec.build).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::Deploy(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StartContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StopContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RemoveContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::CloneRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PullRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StopAllContainers(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneDockerNetworks(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneDockerImages(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneDockerContainers(exec) => {
|
||||
exec.server.clone_from(
|
||||
|
||||
for stage in &mut procedure.config.stages {
|
||||
for execution in &mut stage.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::RunProcedure(exec) => exec.procedure.clone_from(
|
||||
names
|
||||
.procedures
|
||||
.get(&exec.procedure)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunBuild(exec) => exec.build.clone_from(
|
||||
names.builds.get(&exec.build).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::Deploy(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StartContainer(exec) => {
|
||||
exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
Execution::StopContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RemoveContainer(exec) => {
|
||||
exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
Execution::CloneRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PullRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StopAllContainers(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
)
|
||||
),
|
||||
Execution::PruneNetworks(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneImages(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneContainers(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunSync(exec) => exec.sync.clone_from(
|
||||
names.syncs.get(&exec.sync).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::Sleep(_) | Execution::None(_) => {}
|
||||
}
|
||||
Execution::None(_) => continue,
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
.procedures
|
||||
.push(convert_resource(procedure, &names.tags));
|
||||
.push(convert_resource::<Procedure>(procedure, &names.tags));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -350,6 +405,7 @@ struct ResourceNames {
|
||||
repos: HashMap<String, String>,
|
||||
deployments: HashMap<String, String>,
|
||||
procedures: HashMap<String, String>,
|
||||
syncs: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ResourceNames {
|
||||
@@ -398,6 +454,12 @@ impl ResourceNames {
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
syncs: find_collect(&db.resource_syncs, None, None)
|
||||
.await
|
||||
.context("failed to get all resource syncs")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -450,13 +512,13 @@ async fn add_user_groups(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_resource<Config, Info: Default, PartialConfig>(
|
||||
resource: Resource<Config, Info>,
|
||||
fn convert_resource<R: MonitorResource>(
|
||||
resource: Resource<R::Config, R::Info>,
|
||||
tag_names: &HashMap<String, String>,
|
||||
) -> ResourceToml<PartialConfig>
|
||||
where
|
||||
Config: Into<PartialConfig>,
|
||||
{
|
||||
) -> ResourceToml<R::PartialConfig> {
|
||||
// This makes sure all non-necessary (defaulted) fields don't make it into final toml
|
||||
let partial: R::PartialConfig = resource.config.into();
|
||||
let config = R::Config::default().minimize_partial(partial);
|
||||
ResourceToml {
|
||||
name: resource.name,
|
||||
tags: resource
|
||||
@@ -465,6 +527,6 @@ where
|
||||
.filter_map(|t| tag_names.get(t).cloned())
|
||||
.collect(),
|
||||
description: resource.description,
|
||||
config: resource.config.into(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
|
||||
entities::{
|
||||
@@ -14,6 +13,7 @@ use monitor_client::{
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
sync::ResourceSync,
|
||||
update::{
|
||||
ResourceTarget, ResourceTargetVariant, Update, UpdateListItem,
|
||||
},
|
||||
@@ -28,21 +28,21 @@ use mungos::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const UPDATES_PER_PAGE: i64 = 20;
|
||||
const UPDATES_PER_PAGE: i64 = 100;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListUpdates, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListUpdates { query, page }: ListUpdates,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListUpdatesResponse> {
|
||||
let query = if user.admin {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
query
|
||||
} else {
|
||||
let server_ids = get_resource_ids_for_non_admin(
|
||||
@@ -141,6 +141,7 @@ impl Resolve<ListUpdates, User> for State {
|
||||
target: u.target,
|
||||
status: u.status,
|
||||
version: u.version,
|
||||
other_data: u.other_data,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
@@ -155,7 +156,6 @@ impl Resolve<ListUpdates, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetUpdate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -166,7 +166,7 @@ impl Resolve<GetUpdate, User> for State {
|
||||
.await
|
||||
.context("failed to query to db")?
|
||||
.context("no update exists with given id")?;
|
||||
if user.admin {
|
||||
if user.admin || core_config().transparent_mode {
|
||||
return Ok(update);
|
||||
}
|
||||
match &update.target {
|
||||
@@ -239,6 +239,14 @@ impl Resolve<GetUpdate, User> for State {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetUsername, GetUsernameResponse, ListApiKeys,
|
||||
@@ -9,13 +8,14 @@ use monitor_client::{
|
||||
entities::user::{User, UserConfig},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id, find::find_collect, mongodb::bson::doc,
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetUsername, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -27,13 +27,19 @@ impl Resolve<GetUsername, User> for State {
|
||||
.context("failed at mongo query for user")?
|
||||
.context("no user found with id")?;
|
||||
|
||||
let avatar = match user.config {
|
||||
UserConfig::Github { avatar, .. } => Some(avatar),
|
||||
UserConfig::Google { avatar, .. } => Some(avatar),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
Ok(GetUsernameResponse {
|
||||
username: user.username,
|
||||
avatar,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListUsers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -43,16 +49,18 @@ impl Resolve<ListUsers, User> for State {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("this route is only accessable by admins"));
|
||||
}
|
||||
let mut users =
|
||||
find_collect(&db_client().await.users, None, None)
|
||||
.await
|
||||
.context("failed to pull users from db")?;
|
||||
let mut users = find_collect(
|
||||
&db_client().await.users,
|
||||
None,
|
||||
FindOptions::builder().sort(doc! { "username": 1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to pull users from db")?;
|
||||
users.iter_mut().for_each(|user| user.sanitize());
|
||||
Ok(users)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListApiKeys, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -62,7 +70,7 @@ impl Resolve<ListApiKeys, User> for State {
|
||||
let api_keys = find_collect(
|
||||
&db_client().await.api_keys,
|
||||
doc! { "user_id": &user.id },
|
||||
None,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for api keys")?
|
||||
@@ -76,7 +84,6 @@ impl Resolve<ListApiKeys, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListApiKeysForServiceUser, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetUserGroup, GetUserGroupResponse, ListUserGroups,
|
||||
@@ -11,13 +10,15 @@ use monitor_client::{
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId, Document},
|
||||
mongodb::{
|
||||
bson::{doc, oid::ObjectId, Document},
|
||||
options::FindOptions,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<GetUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -43,7 +44,6 @@ impl Resolve<GetUserGroup, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<ListUserGroups, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -54,8 +54,12 @@ impl Resolve<ListUserGroups, User> for State {
|
||||
if !user.admin {
|
||||
filter.insert("users", &user.id);
|
||||
}
|
||||
find_collect(&db_client().await.user_groups, filter, None)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")
|
||||
find_collect(
|
||||
&db_client().await.user_groups,
|
||||
filter,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")
|
||||
}
|
||||
}
|
||||
|
||||
47
bin/core/src/api/read/variable.rs
Normal file
47
bin/core/src/api/read/variable.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use anyhow::Context;
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetVariable, GetVariableResponse, ListVariables,
|
||||
ListVariablesResponse,
|
||||
},
|
||||
entities::user::User,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_variable,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetVariable { name }: GetVariable,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetVariableResponse> {
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListVariables, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListVariables {}: ListVariables,
|
||||
_: User,
|
||||
) -> anyhow::Result<ListVariablesResponse> {
|
||||
let variables = find_collect(
|
||||
&db_client().await.variables,
|
||||
None,
|
||||
FindOptions::builder().sort(doc! { "name": 1 }).build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for variables")?;
|
||||
Ok(ListVariablesResponse {
|
||||
variables,
|
||||
secrets: core_config().secrets.keys().cloned().collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
225
bin/core/src/api/user.rs
Normal file
225
bin/core/src/api/user.rs
Normal file
@@ -0,0 +1,225 @@
|
||||
use std::{collections::VecDeque, time::Instant};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Json, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use mongo_indexed::doc;
|
||||
use monitor_client::{
|
||||
api::user::{
|
||||
CreateApiKey, CreateApiKeyResponse, DeleteApiKey,
|
||||
DeleteApiKeyResponse, PushRecentlyViewed,
|
||||
PushRecentlyViewedResponse, SetLastSeenUpdate,
|
||||
SetLastSeenUpdateResponse,
|
||||
},
|
||||
entities::{
|
||||
api_key::ApiKey, monitor_timestamp, update::ResourceTarget,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::{auth_request, random_string},
|
||||
helpers::query::get_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
enum UserRequest {
|
||||
PushRecentlyViewed(PushRecentlyViewed),
|
||||
SetLastSeenUpdate(SetLastSeenUpdate),
|
||||
CreateApiKey(CreateApiKey),
|
||||
DeleteApiKey(DeleteApiKey),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
#[instrument(name = "UserHandler", level = "debug", skip(user))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<UserRequest>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let timer = Instant::now();
|
||||
let req_id = Uuid::new_v4();
|
||||
debug!(
|
||||
"/user request {req_id} | user: {} ({})",
|
||||
user.username, user.id
|
||||
);
|
||||
let res =
|
||||
State
|
||||
.resolve_request(request, user)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
});
|
||||
if let Err(e) = &res {
|
||||
warn!("/user request {req_id} error: {e:#}");
|
||||
}
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/user request {req_id} | resolve time: {elapsed:?}");
|
||||
Ok((TypedHeader(ContentType::json()), res?))
|
||||
}
|
||||
|
||||
const RECENTLY_VIEWED_MAX: usize = 10;
|
||||
|
||||
impl Resolve<PushRecentlyViewed, User> for State {
|
||||
#[instrument(
|
||||
name = "PushRecentlyViewed",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PushRecentlyViewed { resource }: PushRecentlyViewed,
|
||||
user: User,
|
||||
) -> anyhow::Result<PushRecentlyViewedResponse> {
|
||||
let user = get_user(&user.id).await?;
|
||||
|
||||
let (recents, id, field) = match resource {
|
||||
ResourceTarget::Server(id) => {
|
||||
(user.recent_servers, id, "recent_servers")
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
(user.recent_deployments, id, "recent_deployments")
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
(user.recent_builds, id, "recent_builds")
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
(user.recent_repos, id, "recent_repos")
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
(user.recent_procedures, id, "recent_procedures")
|
||||
}
|
||||
_ => return Ok(PushRecentlyViewedResponse {}),
|
||||
};
|
||||
|
||||
let mut recents = recents
|
||||
.into_iter()
|
||||
.filter(|_id| !id.eq(_id))
|
||||
.take(RECENTLY_VIEWED_MAX - 1)
|
||||
.collect::<VecDeque<_>>();
|
||||
recents.push_front(id);
|
||||
let update = doc! { field: to_bson(&recents)? };
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(update),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("failed to update {field}"))?;
|
||||
|
||||
Ok(PushRecentlyViewedResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<SetLastSeenUpdate, User> for State {
|
||||
#[instrument(
|
||||
name = "SetLastSeenUpdate",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
SetLastSeenUpdate {}: SetLastSeenUpdate,
|
||||
user: User,
|
||||
) -> anyhow::Result<SetLastSeenUpdateResponse> {
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
"last_update_view": monitor_timestamp()
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update user last_update_view")?;
|
||||
Ok(SetLastSeenUpdateResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
impl Resolve<CreateApiKey, User> for State {
|
||||
#[instrument(
|
||||
name = "CreateApiKey",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateApiKey { name, expires }: CreateApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateApiKeyResponse> {
|
||||
let user = get_user(&user.id).await?;
|
||||
|
||||
let key = format!("K-{}", random_string(SECRET_LENGTH));
|
||||
let secret = format!("S-{}", random_string(SECRET_LENGTH));
|
||||
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
|
||||
.context("failed at hashing secret string")?;
|
||||
|
||||
let api_key = ApiKey {
|
||||
name,
|
||||
key: key.clone(),
|
||||
secret: secret_hash,
|
||||
user_id: user.id.clone(),
|
||||
created_at: monitor_timestamp(),
|
||||
expires,
|
||||
};
|
||||
db_client()
|
||||
.await
|
||||
.api_keys
|
||||
.insert_one(api_key, None)
|
||||
.await
|
||||
.context("failed to create api key on db")?;
|
||||
Ok(CreateApiKeyResponse { key, secret })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteApiKey, User> for State {
|
||||
#[instrument(
|
||||
name = "DeleteApiKey",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteApiKey { key }: DeleteApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyResponse> {
|
||||
let client = db_client().await;
|
||||
let key = client
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key }, None)
|
||||
.await
|
||||
.context("failed at db query")?
|
||||
.context("no api key with key found")?;
|
||||
if user.id != key.user_id {
|
||||
return Err(anyhow!("api key does not belong to user"));
|
||||
}
|
||||
client
|
||||
.api_keys
|
||||
.delete_one(doc! { "key": key.key }, None)
|
||||
.await
|
||||
.context("failed to delete api key from db")?;
|
||||
Ok(DeleteApiKeyResponse {})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
|
||||
@@ -11,7 +10,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateAlerter, User> for State {
|
||||
#[instrument(name = "CreateAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -23,7 +21,6 @@ impl Resolve<CreateAlerter, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyAlerter, User> for State {
|
||||
#[instrument(name = "CopyAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -31,20 +28,16 @@ impl Resolve<CopyAlerter, User> for State {
|
||||
CopyAlerter { name, id }: CopyAlerter,
|
||||
user: User,
|
||||
) -> anyhow::Result<Alerter> {
|
||||
let Alerter {
|
||||
config,
|
||||
..
|
||||
} = resource::get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
let Alerter { config, .. } = resource::get_check_permissions::<
|
||||
Alerter,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Write
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Alerter>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteAlerter, User> for State {
|
||||
#[instrument(name = "DeleteAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -56,7 +49,6 @@ impl Resolve<DeleteAlerter, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateAlerter, User> for State {
|
||||
#[instrument(name = "UpdateAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,152 +0,0 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
api_key::ApiKey,
|
||||
monitor_timestamp,
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::find_one_by_id, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
auth::random_string,
|
||||
helpers::query::get_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateApiKey, User> for State {
|
||||
#[instrument(
|
||||
name = "CreateApiKey",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateApiKey { name, expires }: CreateApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateApiKeyResponse> {
|
||||
let user = get_user(&user.id).await?;
|
||||
|
||||
let key = format!("K-{}", random_string(SECRET_LENGTH));
|
||||
let secret = format!("S-{}", random_string(SECRET_LENGTH));
|
||||
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
|
||||
.context("failed at hashing secret string")?;
|
||||
|
||||
let api_key = ApiKey {
|
||||
name,
|
||||
key: key.clone(),
|
||||
secret: secret_hash,
|
||||
user_id: user.id.clone(),
|
||||
created_at: monitor_timestamp(),
|
||||
expires,
|
||||
};
|
||||
db_client()
|
||||
.await
|
||||
.api_keys
|
||||
.insert_one(api_key, None)
|
||||
.await
|
||||
.context("failed to create api key on db")?;
|
||||
Ok(CreateApiKeyResponse { key, secret })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteApiKey, User> for State {
|
||||
#[instrument(
|
||||
name = "DeleteApiKey",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteApiKey { key }: DeleteApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyResponse> {
|
||||
let client = db_client().await;
|
||||
let key = client
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key }, None)
|
||||
.await
|
||||
.context("failed at db query")?
|
||||
.context("no api key with key found")?;
|
||||
if user.id != key.user_id {
|
||||
return Err(anyhow!("api key does not belong to user"));
|
||||
}
|
||||
client
|
||||
.api_keys
|
||||
.delete_one(doc! { "key": key.key }, None)
|
||||
.await
|
||||
.context("failed to delete api key from db")?;
|
||||
Ok(DeleteApiKeyResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateApiKeyForServiceUser, User> for State {
|
||||
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateApiKeyForServiceUser {
|
||||
user_id,
|
||||
name,
|
||||
expires,
|
||||
}: CreateApiKeyForServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
self
|
||||
.resolve(CreateApiKey { name, expires }, service_user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
|
||||
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let api_key = db
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key }, None)
|
||||
.await
|
||||
.context("failed to query db for api key")?
|
||||
.context("did not find matching api key")?;
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &api_key.user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
db.api_keys
|
||||
.delete_one(doc! { "key": key }, None)
|
||||
.await
|
||||
.context("failed to delete api key on db")?;
|
||||
Ok(DeleteApiKeyForServiceUserResponse {})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{build::Build, permission::PermissionLevel, user::User},
|
||||
@@ -7,7 +6,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateBuild, User> for State {
|
||||
#[instrument(name = "CreateBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -19,7 +17,6 @@ impl Resolve<CreateBuild, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyBuild, User> for State {
|
||||
#[instrument(name = "CopyBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -27,20 +24,17 @@ impl Resolve<CopyBuild, User> for State {
|
||||
CopyBuild { name, id }: CopyBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
let Build {
|
||||
config,
|
||||
..
|
||||
} = resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
let Build { config, .. } =
|
||||
resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Build>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteBuild, User> for State {
|
||||
#[instrument(name = "DeleteBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -52,7 +46,6 @@ impl Resolve<DeleteBuild, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateBuild, User> for State {
|
||||
#[instrument(name = "UpdateBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -9,7 +8,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateBuilder, User> for State {
|
||||
#[instrument(name = "CreateBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -21,7 +19,6 @@ impl Resolve<CreateBuilder, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyBuilder, User> for State {
|
||||
#[instrument(name = "CopyBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -39,7 +36,6 @@ impl Resolve<CopyBuilder, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteBuilder, User> for State {
|
||||
#[instrument(name = "DeleteBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -51,7 +47,6 @@ impl Resolve<DeleteBuilder, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateBuilder, User> for State {
|
||||
#[instrument(name = "UpdateBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -27,7 +26,6 @@ use crate::{
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateDeployment, User> for State {
|
||||
#[instrument(name = "CreateDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -39,7 +37,6 @@ impl Resolve<CreateDeployment, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyDeployment, User> for State {
|
||||
#[instrument(name = "CopyDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -58,7 +55,6 @@ impl Resolve<CopyDeployment, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteDeployment, User> for State {
|
||||
#[instrument(name = "DeleteDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -70,7 +66,6 @@ impl Resolve<DeleteDeployment, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateDeployment, User> for State {
|
||||
#[instrument(name = "UpdateDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -82,7 +77,6 @@ impl Resolve<UpdateDeployment, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RenameDeployment, User> for State {
|
||||
#[instrument(name = "RenameDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
use anyhow::anyhow;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{UpdateDescription, UpdateDescriptionResponse},
|
||||
entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
update::ResourceTarget, user::User,
|
||||
sync::ResourceSync, update::ResourceTarget, user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateDescription, User> for State {
|
||||
#[instrument(name = "UpdateDescription", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -94,6 +92,14 @@ impl Resolve<UpdateDescription, User> for State {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_description::<ResourceSync>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateDescriptionResponse {})
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use resolver_api::{derive::Resolver, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
@@ -13,7 +13,6 @@ use uuid::Uuid;
|
||||
use crate::{auth::auth_request, state::State};
|
||||
|
||||
mod alerter;
|
||||
mod api_key;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
@@ -23,27 +22,23 @@ mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod service_user;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod user;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
enum WriteRequest {
|
||||
// ==== API KEY ====
|
||||
CreateApiKey(CreateApiKey),
|
||||
DeleteApiKey(DeleteApiKey),
|
||||
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
|
||||
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
|
||||
|
||||
// ==== USER ====
|
||||
PushRecentlyViewed(PushRecentlyViewed),
|
||||
SetLastSeenUpdate(SetLastSeenUpdate),
|
||||
pub enum WriteRequest {
|
||||
// ==== SERVICE USER ====
|
||||
CreateServiceUser(CreateServiceUser),
|
||||
UpdateServiceUserDescription(UpdateServiceUserDescription),
|
||||
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
|
||||
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
|
||||
|
||||
// ==== USER GROUP ====
|
||||
CreateUserGroup(CreateUserGroup),
|
||||
@@ -111,11 +106,24 @@ enum WriteRequest {
|
||||
DeleteProcedure(DeleteProcedure),
|
||||
UpdateProcedure(UpdateProcedure),
|
||||
|
||||
// ==== SYNC ====
|
||||
CreateResourceSync(CreateResourceSync),
|
||||
CopyResourceSync(CopyResourceSync),
|
||||
DeleteResourceSync(DeleteResourceSync),
|
||||
UpdateResourceSync(UpdateResourceSync),
|
||||
RefreshResourceSyncPending(RefreshResourceSyncPending),
|
||||
|
||||
// ==== TAG ====
|
||||
CreateTag(CreateTag),
|
||||
DeleteTag(DeleteTag),
|
||||
RenameTag(RenameTag),
|
||||
UpdateTagsOnResource(UpdateTagsOnResource),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
CreateVariable(CreateVariable),
|
||||
UpdateVariableValue(UpdateVariableValue),
|
||||
UpdateVariableDescription(UpdateVariableDescription),
|
||||
DeleteVariable(DeleteVariable),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
|
||||
@@ -27,7 +26,6 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateUserBasePermissions, User> for State {
|
||||
#[instrument(name = "UpdateUserBasePermissions", skip(self, admin))]
|
||||
async fn resolve(
|
||||
@@ -75,7 +73,6 @@ impl Resolve<UpdateUserBasePermissions, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdatePermissionOnTarget, User> for State {
|
||||
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
|
||||
async fn resolve(
|
||||
@@ -307,5 +304,20 @@ async fn extract_resource_target_with_validation(
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ServerTemplate, id))
|
||||
}
|
||||
ResourceTarget::ResourceSync(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.resource_syncs
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for resource syncs")?
|
||||
.context("no matching resource sync found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ResourceSync, id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -9,7 +8,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateProcedure, User> for State {
|
||||
#[instrument(name = "CreateProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -21,7 +19,6 @@ impl Resolve<CreateProcedure, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyProcedure, User> for State {
|
||||
#[instrument(name = "CopyProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -40,7 +37,6 @@ impl Resolve<CopyProcedure, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateProcedure, User> for State {
|
||||
#[instrument(name = "UpdateProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -52,7 +48,6 @@ impl Resolve<UpdateProcedure, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteProcedure, User> for State {
|
||||
#[instrument(name = "DeleteProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{permission::PermissionLevel, repo::Repo, user::User},
|
||||
@@ -7,7 +6,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateRepo, User> for State {
|
||||
#[instrument(name = "CreateRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -19,7 +17,6 @@ impl Resolve<CreateRepo, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyRepo, User> for State {
|
||||
#[instrument(name = "CopyRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -38,7 +35,6 @@ impl Resolve<CopyRepo, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteRepo, User> for State {
|
||||
#[instrument(name = "DeleteRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -50,7 +46,6 @@ impl Resolve<DeleteRepo, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateRepo, User> for State {
|
||||
#[instrument(name = "UpdateRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -25,7 +24,6 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateServer, User> for State {
|
||||
#[instrument(name = "CreateServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -37,7 +35,6 @@ impl Resolve<CreateServer, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteServer, User> for State {
|
||||
#[instrument(name = "DeleteServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -49,7 +46,6 @@ impl Resolve<DeleteServer, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateServer, User> for State {
|
||||
#[instrument(name = "UpdateServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -61,7 +57,6 @@ impl Resolve<UpdateServer, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RenameServer, User> for State {
|
||||
#[instrument(name = "RenameServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -91,7 +86,6 @@ impl Resolve<RenameServer, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateNetwork, User> for State {
|
||||
#[instrument(name = "CreateNetwork", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -129,7 +123,6 @@ impl Resolve<CreateNetwork, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteNetwork, User> for State {
|
||||
#[instrument(name = "DeleteNetwork", skip(self, user))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use axum::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
|
||||
@@ -13,8 +12,8 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateServerTemplate, User> for State {
|
||||
#[instrument(name = "CreateServerTemplate", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateServerTemplate { name, config }: CreateServerTemplate,
|
||||
@@ -24,8 +23,8 @@ impl Resolve<CreateServerTemplate, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CopyServerTemplate, User> for State {
|
||||
#[instrument(name = "CopyServerTemplate", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyServerTemplate { name, id }: CopyServerTemplate,
|
||||
@@ -43,8 +42,8 @@ impl Resolve<CopyServerTemplate, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteServerTemplate, User> for State {
|
||||
#[instrument(name = "DeleteServerTemplate", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteServerTemplate { id }: DeleteServerTemplate,
|
||||
@@ -54,8 +53,8 @@ impl Resolve<DeleteServerTemplate, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateServerTemplate, User> for State {
|
||||
#[instrument(name = "UpdateServerTemplate", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateServerTemplate { id, config }: UpdateServerTemplate,
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
use std::{collections::VecDeque, str::FromStr};
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateServiceUser, CreateServiceUserResponse, PushRecentlyViewed,
|
||||
PushRecentlyViewedResponse, SetLastSeenUpdate,
|
||||
SetLastSeenUpdateResponse, UpdateServiceUserDescription,
|
||||
UpdateServiceUserDescriptionResponse,
|
||||
api::{
|
||||
user::CreateApiKey,
|
||||
write::{
|
||||
CreateApiKeyForServiceUser, CreateApiKeyForServiceUserResponse,
|
||||
CreateServiceUser, CreateServiceUserResponse,
|
||||
DeleteApiKeyForServiceUser, DeleteApiKeyForServiceUserResponse,
|
||||
UpdateServiceUserDescription,
|
||||
UpdateServiceUserDescriptionResponse,
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
@@ -15,77 +18,13 @@ use monitor_client::{
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId, to_bson},
|
||||
by_id::find_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
const RECENTLY_VIEWED_MAX: usize = 10;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<PushRecentlyViewed, User> for State {
|
||||
#[instrument(name = "PushRecentlyViewed", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PushRecentlyViewed { resource }: PushRecentlyViewed,
|
||||
user: User,
|
||||
) -> anyhow::Result<PushRecentlyViewedResponse> {
|
||||
let mut recently_viewed = get_user(&user.id)
|
||||
.await?
|
||||
.recently_viewed
|
||||
.into_iter()
|
||||
.filter(|r| !resource.eq(r))
|
||||
.take(RECENTLY_VIEWED_MAX - 1)
|
||||
.collect::<VecDeque<_>>();
|
||||
|
||||
recently_viewed.push_front(resource);
|
||||
|
||||
let recently_viewed = to_bson(&recently_viewed)
|
||||
.context("failed to convert recently views to bson")?;
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
"recently_viewed": recently_viewed
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("context")?;
|
||||
|
||||
Ok(PushRecentlyViewedResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<SetLastSeenUpdate, User> for State {
|
||||
#[instrument(name = "SetLastSeenUpdate", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
SetLastSeenUpdate {}: SetLastSeenUpdate,
|
||||
user: User,
|
||||
) -> anyhow::Result<SetLastSeenUpdateResponse> {
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
"last_update_view": monitor_timestamp()
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update user last_update_view")?;
|
||||
Ok(SetLastSeenUpdateResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateServiceUser, User> for State {
|
||||
#[instrument(name = "CreateServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -112,7 +51,11 @@ impl Resolve<CreateServiceUser, User> for State {
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
last_update_view: 0,
|
||||
recently_viewed: Vec::new(),
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
updated_at: monitor_timestamp(),
|
||||
};
|
||||
user.id = db_client()
|
||||
@@ -129,7 +72,6 @@ impl Resolve<CreateServiceUser, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateServiceUserDescription, User> for State {
|
||||
#[instrument(
|
||||
name = "UpdateServiceUserDescription",
|
||||
@@ -171,3 +113,64 @@ impl Resolve<UpdateServiceUserDescription, User> for State {
|
||||
.context("user with username not found")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateApiKeyForServiceUser, User> for State {
|
||||
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateApiKeyForServiceUser {
|
||||
user_id,
|
||||
name,
|
||||
expires,
|
||||
}: CreateApiKeyForServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
self
|
||||
.resolve(CreateApiKey { name, expires }, service_user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
|
||||
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let api_key = db
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key }, None)
|
||||
.await
|
||||
.context("failed to query db for api key")?
|
||||
.context("did not find matching api key")?;
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &api_key.user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
db.api_keys
|
||||
.delete_one(doc! { "key": key }, None)
|
||||
.await
|
||||
.context("failed to delete api key on db")?;
|
||||
Ok(DeleteApiKeyForServiceUserResponse {})
|
||||
}
|
||||
}
|
||||
321
bin/core/src/api/write/sync.rs
Normal file
321
bin/core/src/api/write/sync.rs
Normal file
@@ -0,0 +1,321 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
self,
|
||||
alert::{Alert, AlertData},
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::{stats::SeverityLevel, Server},
|
||||
server_template::ServerTemplate,
|
||||
sync::{
|
||||
PendingSyncUpdates, PendingSyncUpdatesData,
|
||||
PendingSyncUpdatesDataErr, PendingSyncUpdatesDataOk,
|
||||
ResourceSync,
|
||||
},
|
||||
update::ResourceTarget,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
query::get_id_to_tags,
|
||||
sync::resource::{get_updates_for_view, AllResourcesById},
|
||||
},
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateResourceSync, User> for State {
|
||||
#[instrument(name = "CreateResourceSync", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateResourceSync { name, config }: CreateResourceSync,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
resource::create::<ResourceSync>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyResourceSync, User> for State {
|
||||
#[instrument(name = "CopyResourceSync", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyResourceSync { name, id }: CopyResourceSync,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
let ResourceSync { config, .. } =
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<ResourceSync>(&name, config.into(), &user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteResourceSync, User> for State {
|
||||
#[instrument(name = "DeleteResourceSync", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteResourceSync { id }: DeleteResourceSync,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
resource::delete::<ResourceSync>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateResourceSync, User> for State {
|
||||
#[instrument(name = "UpdateResourceSync", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateResourceSync { id, config }: UpdateResourceSync,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
resource::update::<ResourceSync>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RefreshResourceSyncPending, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
RefreshResourceSyncPending { sync }: RefreshResourceSyncPending,
|
||||
user: User,
|
||||
) -> anyhow::Result<ResourceSync> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// sync should be able to do this.
|
||||
let sync = resource::get_check_permissions::<
|
||||
entities::sync::ResourceSync,
|
||||
>(&sync, &user, PermissionLevel::Execute)
|
||||
.await?;
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(anyhow!("resource sync repo not configured"));
|
||||
}
|
||||
|
||||
let res = async {
|
||||
let (res, _, hash, message) =
|
||||
crate::helpers::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
let resources = res?;
|
||||
|
||||
let all_resources = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
|
||||
let data = PendingSyncUpdatesDataOk {
|
||||
server_updates: get_updates_for_view::<Server>(
|
||||
resources.servers,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get server updates")?,
|
||||
deployment_updates: get_updates_for_view::<Deployment>(
|
||||
resources.deployments,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get deployment updates")?,
|
||||
build_updates: get_updates_for_view::<Build>(
|
||||
resources.builds,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get build updates")?,
|
||||
repo_updates: get_updates_for_view::<Repo>(
|
||||
resources.repos,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get repo updates")?,
|
||||
procedure_updates: get_updates_for_view::<Procedure>(
|
||||
resources.procedures,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get procedure updates")?,
|
||||
alerter_updates: get_updates_for_view::<Alerter>(
|
||||
resources.alerters,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get alerter updates")?,
|
||||
builder_updates: get_updates_for_view::<Builder>(
|
||||
resources.builders,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get builder updates")?,
|
||||
server_template_updates:
|
||||
get_updates_for_view::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get server template updates")?,
|
||||
resource_sync_updates: get_updates_for_view::<
|
||||
entities::sync::ResourceSync,
|
||||
>(
|
||||
resources.resource_syncs,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
&id_to_tags,
|
||||
)
|
||||
.await
|
||||
.context("failed to get resource sync updates")?,
|
||||
variable_updates:
|
||||
crate::helpers::sync::variables::get_updates_for_view(
|
||||
resources.variables,
|
||||
sync.config.delete,
|
||||
)
|
||||
.await
|
||||
.context("failed to get variable updates")?,
|
||||
user_group_updates:
|
||||
crate::helpers::sync::user_groups::get_updates_for_view(
|
||||
resources.user_groups,
|
||||
sync.config.delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await
|
||||
.context("failed to get user group updates")?,
|
||||
};
|
||||
anyhow::Ok((hash, message, data))
|
||||
}
|
||||
.await;
|
||||
|
||||
let (pending, has_updates) = match res {
|
||||
Ok((hash, message, data)) => {
|
||||
let has_updates = !data.no_updates();
|
||||
(
|
||||
PendingSyncUpdates {
|
||||
hash: Some(hash),
|
||||
message: Some(message),
|
||||
data: PendingSyncUpdatesData::Ok(data),
|
||||
},
|
||||
has_updates,
|
||||
)
|
||||
}
|
||||
Err(e) => (
|
||||
PendingSyncUpdates {
|
||||
hash: None,
|
||||
message: None,
|
||||
data: PendingSyncUpdatesData::Err(
|
||||
PendingSyncUpdatesDataErr {
|
||||
message: serialize_error_pretty(&e),
|
||||
},
|
||||
),
|
||||
},
|
||||
false,
|
||||
),
|
||||
};
|
||||
|
||||
let pending = to_document(&pending)
|
||||
.context("failed to serialize pending to document")?;
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.resource_syncs,
|
||||
&sync.id,
|
||||
doc! { "$set": { "info.pending": pending } },
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// check to update alert
|
||||
let id = sync.id.clone();
|
||||
let name = sync.name.clone();
|
||||
tokio::task::spawn(async move {
|
||||
let db = db_client().await;
|
||||
let Some(existing) = db_client()
|
||||
.await
|
||||
.alerts
|
||||
.find_one(
|
||||
doc! {
|
||||
"resolved": false,
|
||||
"target.type": "ResourceSync",
|
||||
"target.id": &id,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for alert")
|
||||
.inspect_err(|e| warn!("{e:#}"))
|
||||
.ok()
|
||||
else {
|
||||
return;
|
||||
};
|
||||
match (existing, has_updates) {
|
||||
// OPEN A NEW ALERT
|
||||
(None, true) => {
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts: monitor_timestamp(),
|
||||
resolved: false,
|
||||
level: SeverityLevel::Ok,
|
||||
target: ResourceTarget::ResourceSync(id.clone()),
|
||||
data: AlertData::ResourceSyncPendingUpdates { id, name },
|
||||
resolved_ts: None,
|
||||
};
|
||||
db.alerts
|
||||
.insert_one(&alert, None)
|
||||
.await
|
||||
.context("failed to open existing pending resource sync updates alert")
|
||||
.inspect_err(|e| warn!("{e:#}"))
|
||||
.ok();
|
||||
}
|
||||
// CLOSE ALERT
|
||||
(Some(existing), false) => {
|
||||
update_one_by_id(
|
||||
&db.alerts,
|
||||
&existing.id,
|
||||
doc! {
|
||||
"$set": {
|
||||
"resolved": true,
|
||||
"resolved_ts": monitor_timestamp()
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to close existing pending resource sync updates alert")
|
||||
.inspect_err(|e| warn!("{e:#}"))
|
||||
.ok();
|
||||
}
|
||||
// NOTHING TO DO
|
||||
_ => {}
|
||||
}
|
||||
});
|
||||
|
||||
crate::resource::get::<ResourceSync>(&sync.id).await
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_trait::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
|
||||
@@ -11,7 +10,7 @@ use monitor_client::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
procedure::Procedure, repo::Repo, server::Server,
|
||||
server_template::ServerTemplate, tag::Tag,
|
||||
server_template::ServerTemplate, sync::ResourceSync, tag::Tag,
|
||||
update::ResourceTarget, user::User,
|
||||
},
|
||||
};
|
||||
@@ -27,7 +26,6 @@ use crate::{
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateTag, User> for State {
|
||||
#[instrument(name = "CreateTag", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -60,7 +58,6 @@ impl Resolve<CreateTag, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RenameTag, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -86,7 +83,6 @@ impl Resolve<RenameTag, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteTag, User> for State {
|
||||
#[instrument(name = "DeleteTag", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -113,7 +109,6 @@ impl Resolve<DeleteTag, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<UpdateTagsOnResource, User> for State {
|
||||
#[instrument(name = "UpdateTagsOnResource", skip(self, user))]
|
||||
async fn resolve(
|
||||
@@ -196,6 +191,15 @@ impl Resolve<UpdateTagsOnResource, User> for State {
|
||||
resource::update_tags::<ServerTemplate>(&id, tags, user)
|
||||
.await?
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<ResourceSync>(&id, tags, user).await?
|
||||
}
|
||||
};
|
||||
Ok(UpdateTagsOnResourceResponse {})
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::async_trait;
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
|
||||
@@ -18,7 +17,6 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -51,7 +49,6 @@ impl Resolve<CreateUserGroup, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RenameUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -77,7 +74,6 @@ impl Resolve<RenameUserGroup, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<DeleteUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -111,7 +107,6 @@ impl Resolve<DeleteUserGroup, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<AddUserToUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -155,7 +150,6 @@ impl Resolve<AddUserToUserGroup, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<RemoveUserFromUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
@@ -202,7 +196,6 @@ impl Resolve<RemoveUserFromUserGroup, User> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<SetUsersInUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
|
||||
169
bin/core/src/api/write/variable.rs
Normal file
169
bin/core/src/api/write/variable.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateVariable, CreateVariableResponse, DeleteVariable,
|
||||
DeleteVariableResponse, UpdateVariableDescription,
|
||||
UpdateVariableDescriptionResponse, UpdateVariableValue,
|
||||
UpdateVariableValueResponse,
|
||||
},
|
||||
entities::{
|
||||
update::ResourceTarget, user::User, variable::Variable, Operation,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
query::get_variable,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateVariable {
|
||||
name,
|
||||
value,
|
||||
description,
|
||||
}: CreateVariable,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
|
||||
let variable = Variable {
|
||||
name,
|
||||
value,
|
||||
description,
|
||||
};
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.insert_one(&variable, None)
|
||||
.await
|
||||
.context("failed to create variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::CreateVariable,
|
||||
&user,
|
||||
);
|
||||
|
||||
update
|
||||
.push_simple_log("create variable", format!("{variable:#?}"));
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
get_variable(&variable.name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableValue, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableValue { name, value }: UpdateVariableValue,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableValueResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
|
||||
let variable = get_variable(&name).await?;
|
||||
|
||||
if value == variable.value {
|
||||
return Err(anyhow!("no change"));
|
||||
}
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
doc! { "$set": { "value": &value } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable value on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateVariableValue,
|
||||
&user,
|
||||
);
|
||||
|
||||
update.push_simple_log(
|
||||
"update variable value",
|
||||
format!(
|
||||
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
|
||||
variable.value
|
||||
),
|
||||
);
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableDescription, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableDescription { name, description }: UpdateVariableDescription,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableDescriptionResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
doc! { "$set": { "description": &description } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable description on db")?;
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteVariable { name }: DeleteVariable,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
let variable = get_variable(&name).await?;
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.delete_one(doc! { "name": &name }, None)
|
||||
.await
|
||||
.context("failed to delete variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::DeleteVariable,
|
||||
&user,
|
||||
);
|
||||
|
||||
update
|
||||
.push_simple_log("delete variable", format!("{variable:#?}"));
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
Ok(variable)
|
||||
}
|
||||
}
|
||||
@@ -86,7 +86,11 @@ async fn callback(
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recently_viewed: Vec::new(),
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
config: UserConfig::Github {
|
||||
github_id,
|
||||
avatar: github_user.avatar_url,
|
||||
|
||||
@@ -2,7 +2,9 @@ use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use jwt::Token;
|
||||
use monitor_client::entities::config::core::{CoreConfig, OauthCredentials};
|
||||
use monitor_client::entities::config::core::{
|
||||
CoreConfig, OauthCredentials,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{de::DeserializeOwned, Deserialize};
|
||||
use serde_json::Value;
|
||||
|
||||
@@ -101,7 +101,11 @@ async fn callback(
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recently_viewed: Vec::new(),
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
config: UserConfig::Google {
|
||||
google_id,
|
||||
avatar: google_user.picture,
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use async_trait::async_trait;
|
||||
use axum::http::HeaderMap;
|
||||
use monitor_client::{
|
||||
api::auth::{
|
||||
@@ -22,7 +21,6 @@ use crate::{
|
||||
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<CreateLocalUser, HeaderMap> for State {
|
||||
#[instrument(name = "CreateLocalUser", skip(self))]
|
||||
async fn resolve(
|
||||
@@ -63,7 +61,11 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recently_viewed: Vec::new(),
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
config: UserConfig::Local { password },
|
||||
};
|
||||
|
||||
@@ -86,7 +88,6 @@ impl Resolve<CreateLocalUser, HeaderMap> for State {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Resolve<LoginLocalUser, HeaderMap> for State {
|
||||
#[instrument(name = "LoginLocalUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
|
||||
@@ -8,7 +8,9 @@ use axum::{
|
||||
use monitor_client::entities::{monitor_timestamp, user::User};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user,
|
||||
@@ -36,7 +38,9 @@ pub async fn auth_request(
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> serror::Result<Response> {
|
||||
let user = authenticate_check_enabled(&headers).await?;
|
||||
let user = authenticate_check_enabled(&headers)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
req.extensions_mut().insert(user);
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
@@ -12,11 +12,12 @@ use aws_sdk_ec2::{
|
||||
},
|
||||
Client,
|
||||
};
|
||||
use base64::Engine;
|
||||
use monitor_client::entities::{
|
||||
alert::{Alert, AlertData, AlertDataVariant},
|
||||
alert::{Alert, AlertData},
|
||||
monitor_timestamp,
|
||||
server::stats::SeverityLevel,
|
||||
server_template::AwsServerTemplateConfig,
|
||||
server_template::aws::AwsServerTemplateConfig,
|
||||
update::ResourceTarget,
|
||||
};
|
||||
|
||||
@@ -92,7 +93,10 @@ pub async fn launch_ec2_instance(
|
||||
)
|
||||
.min_count(1)
|
||||
.max_count(1)
|
||||
.user_data(user_data);
|
||||
.user_data(
|
||||
base64::engine::general_purpose::STANDARD_NO_PAD
|
||||
.encode(user_data),
|
||||
);
|
||||
|
||||
for volume in volumes {
|
||||
let ebs = EbsBlockDevice::builder()
|
||||
@@ -165,16 +169,16 @@ pub async fn terminate_ec2_instance_with_retry(
|
||||
}
|
||||
Err(e) => {
|
||||
if i == MAX_TERMINATION_TRIES - 1 {
|
||||
error!("failed to terminate instance {instance_id}.");
|
||||
error!("failed to terminate aws instance {instance_id}.");
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts: monitor_timestamp(),
|
||||
resolved: false,
|
||||
level: SeverityLevel::Critical,
|
||||
target: ResourceTarget::system(),
|
||||
variant: AlertDataVariant::AwsBuilderTerminationFailed,
|
||||
data: AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id: instance_id.to_string(),
|
||||
message: format!("{e:#}"),
|
||||
},
|
||||
resolved_ts: None,
|
||||
};
|
||||
@@ -191,7 +195,7 @@ pub async fn terminate_ec2_instance_with_retry(
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument(skip(client))]
|
||||
async fn terminate_ec2_instance_inner(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
|
||||
157
bin/core/src/cloud/hetzner/client.rs
Normal file
157
bin/core/src/cloud/hetzner/client.rs
Normal file
@@ -0,0 +1,157 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::http::{HeaderName, HeaderValue};
|
||||
use reqwest::{RequestBuilder, StatusCode};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
|
||||
use super::{
|
||||
common::{
|
||||
HetznerActionResponse, HetznerDatacenterResponse,
|
||||
HetznerServerResponse, HetznerVolumeResponse,
|
||||
},
|
||||
create_server::{CreateServerBody, CreateServerResponse},
|
||||
create_volume::{CreateVolumeBody, CreateVolumeResponse},
|
||||
};
|
||||
|
||||
const BASE_URL: &str = "https://api.hetzner.cloud/v1";
|
||||
|
||||
pub struct HetznerClient(reqwest::Client);
|
||||
|
||||
impl HetznerClient {
|
||||
pub fn new(token: &str) -> HetznerClient {
|
||||
HetznerClient(
|
||||
reqwest::ClientBuilder::new()
|
||||
.default_headers(
|
||||
[(
|
||||
HeaderName::from_static("authorization"),
|
||||
HeaderValue::from_str(&format!("Bearer {token}"))
|
||||
.unwrap(),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
)
|
||||
.build()
|
||||
.context("failed to build Hetzner request client")
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn get_server(
|
||||
&self,
|
||||
id: i64,
|
||||
) -> anyhow::Result<HetznerServerResponse> {
|
||||
self.get(&format!("/servers/{id}")).await
|
||||
}
|
||||
|
||||
pub async fn create_server(
|
||||
&self,
|
||||
body: &CreateServerBody,
|
||||
) -> anyhow::Result<CreateServerResponse> {
|
||||
self.post("/servers", body).await
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn delete_server(
|
||||
&self,
|
||||
id: i64,
|
||||
) -> anyhow::Result<HetznerActionResponse> {
|
||||
self.delete(&format!("/servers/{id}")).await
|
||||
}
|
||||
|
||||
pub async fn get_volume(
|
||||
&self,
|
||||
id: i64,
|
||||
) -> anyhow::Result<HetznerVolumeResponse> {
|
||||
self.get(&format!("/volumes/{id}")).await
|
||||
}
|
||||
|
||||
pub async fn create_volume(
|
||||
&self,
|
||||
body: &CreateVolumeBody,
|
||||
) -> anyhow::Result<CreateVolumeResponse> {
|
||||
self.post("/volumes", body).await
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn delete_volume(&self, id: i64) -> anyhow::Result<()> {
|
||||
let res = self
|
||||
.0
|
||||
.delete(format!("{BASE_URL}/volumes/{id}"))
|
||||
.send()
|
||||
.await
|
||||
.context("failed at request to delete volume")?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status == StatusCode::NO_CONTENT {
|
||||
Ok(())
|
||||
} else {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response body as text")?;
|
||||
Err(anyhow!("{status} | {text}"))
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn list_datacenters(
|
||||
&self,
|
||||
) -> anyhow::Result<HetznerDatacenterResponse> {
|
||||
self.get("/datacenters").await
|
||||
}
|
||||
|
||||
async fn get<Res: DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
) -> anyhow::Result<Res> {
|
||||
let req = self.0.get(format!("{BASE_URL}{path}"));
|
||||
handle_req(req).await.with_context(|| {
|
||||
format!("failed at GET request to Hetzner | path: {path}")
|
||||
})
|
||||
}
|
||||
|
||||
async fn post<Body: Serialize, Res: DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
body: &Body,
|
||||
) -> anyhow::Result<Res> {
|
||||
let req = self.0.post(format!("{BASE_URL}{path}")).json(&body);
|
||||
handle_req(req).await.with_context(|| {
|
||||
format!("failed at POST request to Hetzner | path: {path}")
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
async fn delete<Res: DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
) -> anyhow::Result<Res> {
|
||||
let req = self.0.delete(format!("{BASE_URL}{path}"));
|
||||
handle_req(req).await.with_context(|| {
|
||||
format!("failed at DELETE request to Hetzner | path: {path}")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_req<Res: DeserializeOwned>(
|
||||
req: RequestBuilder,
|
||||
) -> anyhow::Result<Res> {
|
||||
let res = req.send().await?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status.is_success() {
|
||||
res.json().await.context("failed to parse response to json")
|
||||
} else {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response body as text")?;
|
||||
if let Ok(json_error) =
|
||||
serde_json::from_str::<serde_json::Value>(&text)
|
||||
{
|
||||
return Err(anyhow!("{status} | {json_error:?}"));
|
||||
}
|
||||
Err(anyhow!("{status} | {text}"))
|
||||
}
|
||||
}
|
||||
277
bin/core/src/cloud/hetzner/common.rs
Normal file
277
bin/core/src/cloud/hetzner/common.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerServerResponse {
|
||||
pub server: HetznerServer,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerServer {
|
||||
pub id: i64,
|
||||
pub name: String,
|
||||
pub primary_disk_size: f64,
|
||||
pub image: Option<HetznerImage>,
|
||||
pub private_net: Vec<HetznerPrivateNet>,
|
||||
pub public_net: HetznerPublicNet,
|
||||
pub server_type: HetznerServerTypeDetails,
|
||||
pub status: HetznerServerStatus,
|
||||
#[serde(default)]
|
||||
pub volumes: Vec<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerServerTypeDetails {
|
||||
pub architecture: String,
|
||||
pub cores: i64,
|
||||
pub cpu_type: String,
|
||||
pub description: String,
|
||||
pub disk: f64,
|
||||
pub id: i64,
|
||||
pub memory: f64,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerPrivateNet {
|
||||
pub alias_ips: Vec<String>,
|
||||
pub ip: String,
|
||||
pub mac_address: String,
|
||||
pub network: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerPublicNet {
|
||||
#[serde(default)]
|
||||
pub firewalls: Vec<HetznerFirewall>,
|
||||
pub floating_ips: Vec<i64>,
|
||||
pub ipv4: Option<HetznerIpv4>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerFirewall {
|
||||
pub id: i64,
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerIpv4 {
|
||||
pub id: Option<i64>,
|
||||
pub blocked: bool,
|
||||
pub dns_ptr: String,
|
||||
pub ip: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerImage {
|
||||
pub id: i64,
|
||||
pub description: String,
|
||||
pub name: Option<String>,
|
||||
pub os_flavor: String,
|
||||
pub os_version: Option<String>,
|
||||
pub rapid_deploy: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerActionResponse {
|
||||
pub action: HetznerAction,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerAction {
|
||||
pub command: String,
|
||||
pub error: Option<HetznerError>,
|
||||
pub finished: Option<String>,
|
||||
pub id: i64,
|
||||
pub progress: i32,
|
||||
pub resources: Vec<HetznerResource>,
|
||||
pub started: String,
|
||||
pub status: HetznerActionStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerError {
|
||||
pub code: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerResource {
|
||||
pub id: i64,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerVolumeResponse {
|
||||
pub volume: HetznerVolume,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerVolume {
|
||||
/// Name of the Resource. Must be unique per Project.
|
||||
pub name: String,
|
||||
/// Point in time when the Resource was created (in ISO-8601 format).
|
||||
pub created: String,
|
||||
/// Filesystem of the Volume if formatted on creation, null if not formatted on creation
|
||||
pub format: Option<HetznerVolumeFormat>,
|
||||
/// ID of the Volume.
|
||||
pub id: i64,
|
||||
/// User-defined labels ( key/value pairs) for the Resource
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Device path on the file system for the Volume
|
||||
pub linux_device: String,
|
||||
/// Protection configuration for the Resource.
|
||||
pub protection: HetznerProtection,
|
||||
/// ID of the Server the Volume is attached to, null if it is not attached at all
|
||||
pub server: Option<i64>,
|
||||
/// Size in GB of the Volume
|
||||
pub size: i64,
|
||||
/// Current status of the Volume. Allowed: `creating`, `available`
|
||||
pub status: HetznerVolumeStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerProtection {
|
||||
/// Prevent the Resource from being deleted.
|
||||
pub delete: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerDatacenterResponse {
|
||||
pub datacenters: Vec<HetznerDatacenterDetails>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerDatacenterDetails {
|
||||
pub id: i64,
|
||||
pub name: String,
|
||||
pub location: serde_json::Map<String, serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum HetznerLocation {
|
||||
#[serde(rename = "nbg1")]
|
||||
Nuremberg1,
|
||||
#[serde(rename = "hel1")]
|
||||
Helsinki1,
|
||||
#[serde(rename = "fsn1")]
|
||||
Falkenstein1,
|
||||
#[serde(rename = "ash")]
|
||||
Ashburn,
|
||||
#[serde(rename = "hil")]
|
||||
Hillsboro,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum HetznerDatacenter {
|
||||
#[serde(rename = "nbg1-dc3")]
|
||||
Nuremberg1Dc3,
|
||||
#[serde(rename = "hel1-dc2")]
|
||||
Helsinki1Dc2,
|
||||
#[serde(rename = "fsn1-dc14")]
|
||||
Falkenstein1Dc14,
|
||||
#[serde(rename = "ash-dc1")]
|
||||
AshburnDc1,
|
||||
#[serde(rename = "hil-dc1")]
|
||||
HillsboroDc1,
|
||||
}
|
||||
|
||||
impl From<HetznerDatacenter> for HetznerLocation {
|
||||
fn from(value: HetznerDatacenter) -> Self {
|
||||
match value {
|
||||
HetznerDatacenter::Nuremberg1Dc3 => HetznerLocation::Nuremberg1,
|
||||
HetznerDatacenter::Helsinki1Dc2 => HetznerLocation::Helsinki1,
|
||||
HetznerDatacenter::Falkenstein1Dc14 => {
|
||||
HetznerLocation::Falkenstein1
|
||||
}
|
||||
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
|
||||
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerVolumeFormat {
|
||||
Xfs,
|
||||
Ext4,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerVolumeStatus {
|
||||
Creating,
|
||||
Available,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerServerStatus {
|
||||
Running,
|
||||
Initializing,
|
||||
Starting,
|
||||
Stopping,
|
||||
Off,
|
||||
Deleting,
|
||||
Migrating,
|
||||
Rebuilding,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerActionStatus {
|
||||
Running,
|
||||
Success,
|
||||
Error,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum HetznerServerType {
|
||||
// Shared
|
||||
#[serde(rename = "cx11")]
|
||||
SharedIntel1Core2Ram20Disk,
|
||||
#[serde(rename = "cpx11")]
|
||||
SharedAmd2Core2Ram40Disk,
|
||||
#[serde(rename = "cax11")]
|
||||
SharedArm2Core4Ram40Disk,
|
||||
#[serde(rename = "cx21")]
|
||||
SharedIntel2Core4Ram40Disk,
|
||||
#[serde(rename = "cpx21")]
|
||||
SharedAmd3Core4Ram80Disk,
|
||||
#[serde(rename = "cax21")]
|
||||
SharedArm4Core8Ram80Disk,
|
||||
#[serde(rename = "cx31")]
|
||||
SharedIntel2Core8Ram80Disk,
|
||||
#[serde(rename = "cpx31")]
|
||||
SharedAmd4Core8Ram160Disk,
|
||||
#[serde(rename = "cax31")]
|
||||
SharedArm8Core16Ram160Disk,
|
||||
#[serde(rename = "cx41")]
|
||||
SharedIntel4Core16Ram160Disk,
|
||||
#[serde(rename = "cpx41")]
|
||||
SharedAmd8Core16Ram240Disk,
|
||||
#[serde(rename = "cax41")]
|
||||
SharedArm16Core32Ram320Disk,
|
||||
#[serde(rename = "cx51")]
|
||||
SharedIntel8Core32Ram240Disk,
|
||||
#[serde(rename = "cpx51")]
|
||||
SharedAmd16Core32Ram360Disk,
|
||||
// Dedicated
|
||||
#[serde(rename = "ccx13")]
|
||||
DedicatedAmd2Core8Ram80Disk,
|
||||
#[serde(rename = "ccx23")]
|
||||
DedicatedAmd4Core16Ram160Disk,
|
||||
#[serde(rename = "ccx33")]
|
||||
DedicatedAmd8Core32Ram240Disk,
|
||||
#[serde(rename = "ccx43")]
|
||||
DedicatedAmd16Core64Ram360Disk,
|
||||
#[serde(rename = "ccx53")]
|
||||
DedicatedAmd32Core128Ram600Disk,
|
||||
#[serde(rename = "ccx63")]
|
||||
DedicatedAmd48Core192Ram960Disk,
|
||||
}
|
||||
76
bin/core/src/cloud/hetzner/create_server.rs
Normal file
76
bin/core/src/cloud/hetzner/create_server.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::common::{
|
||||
HetznerAction, HetznerDatacenter, HetznerLocation, HetznerServer,
|
||||
HetznerServerType,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CreateServerBody {
|
||||
/// Name of the Server to create (must be unique per Project and a valid hostname as per RFC 1123)
|
||||
pub name: String,
|
||||
/// Auto-mount Volumes after attach
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub automount: Option<bool>,
|
||||
/// ID or name of Datacenter to create Server in (must not be used together with location)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub datacenter: Option<HetznerDatacenter>,
|
||||
/// ID or name of Location to create Server in (must not be used together with datacenter)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub location: Option<HetznerLocation>,
|
||||
/// Firewalls which should be applied on the Server's public network interface at creation time
|
||||
pub firewalls: Vec<Firewall>,
|
||||
/// ID or name of the Image the Server is created from
|
||||
pub image: String,
|
||||
/// User-defined labels (key-value pairs) for the Resource
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Network IDs which should be attached to the Server private network interface at the creation time
|
||||
pub networks: Vec<i64>,
|
||||
/// ID of the Placement Group the server should be in
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub placement_group: Option<i64>,
|
||||
/// Public Network options
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub public_net: Option<PublicNet>,
|
||||
/// ID or name of the Server type this Server should be created with
|
||||
pub server_type: HetznerServerType,
|
||||
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
|
||||
pub ssh_keys: Vec<String>,
|
||||
/// This automatically triggers a Power on a Server-Server Action after the creation is finished and is returned in the next_actions response object.
|
||||
pub start_after_create: bool,
|
||||
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_data: Option<String>,
|
||||
/// Volume IDs which should be attached to the Server at the creation time. Volumes must be in the same Location.
|
||||
pub volumes: Vec<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
pub struct Firewall {
|
||||
/// ID of the Firewall
|
||||
pub firewall: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
pub struct PublicNet {
|
||||
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
|
||||
pub enable_ipv4: bool,
|
||||
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
|
||||
pub enable_ipv6: bool,
|
||||
/// ID of the ipv4 Primary IP to use. If omitted and enable_ipv4 is true, a new ipv4 Primary IP will automatically be created.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ipv4: Option<i64>,
|
||||
/// ID of the ipv6 Primary IP to use. If omitted and enable_ipv6 is true, a new ipv6 Primary IP will automatically be created.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ipv6: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct CreateServerResponse {
|
||||
pub action: HetznerAction,
|
||||
pub next_actions: Vec<HetznerAction>,
|
||||
pub root_password: Option<String>,
|
||||
pub server: HetznerServer,
|
||||
}
|
||||
36
bin/core/src/cloud/hetzner/create_volume.rs
Normal file
36
bin/core/src/cloud/hetzner/create_volume.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::common::{
|
||||
HetznerAction, HetznerLocation, HetznerVolume, HetznerVolumeFormat,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CreateVolumeBody {
|
||||
/// Name of the volume
|
||||
pub name: String,
|
||||
/// Auto-mount Volume after attach. server must be provided.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub automount: Option<bool>,
|
||||
/// Format Volume after creation. One of: xfs, ext4
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub format: Option<HetznerVolumeFormat>,
|
||||
/// User-defined labels (key-value pairs) for the Resource
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Location to create the Volume in (can be omitted if Server is specified)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub location: Option<HetznerLocation>,
|
||||
/// Server to which to attach the Volume once it's created (Volume will be created in the same Location as the server)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server: Option<i64>,
|
||||
/// Size of the Volume in GB
|
||||
pub size: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct CreateVolumeResponse {
|
||||
pub action: HetznerAction,
|
||||
pub next_actions: Vec<HetznerAction>,
|
||||
pub volume: HetznerVolume,
|
||||
}
|
||||
282
bin/core/src/cloud/hetzner/mod.rs
Normal file
282
bin/core/src/cloud/hetzner/mod.rs
Normal file
@@ -0,0 +1,282 @@
|
||||
use std::{
|
||||
sync::{Arc, Mutex, OnceLock},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::entities::server_template::hetzner::{
|
||||
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
|
||||
HetznerVolumeFormat,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
cloud::hetzner::{
|
||||
common::HetznerServerStatus, create_server::CreateServerBody,
|
||||
create_volume::CreateVolumeBody,
|
||||
},
|
||||
config::core_config,
|
||||
};
|
||||
|
||||
use self::{client::HetznerClient, common::HetznerVolumeStatus};
|
||||
|
||||
mod client;
|
||||
mod common;
|
||||
mod create_server;
|
||||
mod create_volume;
|
||||
|
||||
fn hetzner() -> Option<&'static HetznerClient> {
|
||||
static HETZNER_CLIENT: OnceLock<Option<HetznerClient>> =
|
||||
OnceLock::new();
|
||||
HETZNER_CLIENT
|
||||
.get_or_init(|| {
|
||||
let token = &core_config().hetzner.token;
|
||||
(!token.is_empty()).then(|| HetznerClient::new(token))
|
||||
})
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
pub struct HetznerServerMinimal {
|
||||
pub id: i64,
|
||||
pub ip: String,
|
||||
}
|
||||
|
||||
const POLL_RATE_SECS: u64 = 3;
|
||||
const MAX_POLL_TRIES: usize = 100;
|
||||
|
||||
#[instrument]
|
||||
pub async fn launch_hetzner_server(
|
||||
name: &str,
|
||||
config: HetznerServerTemplateConfig,
|
||||
) -> anyhow::Result<HetznerServerMinimal> {
|
||||
let hetzner =
|
||||
*hetzner().as_ref().context("Hetzner token not configured")?;
|
||||
let HetznerServerTemplateConfig {
|
||||
image,
|
||||
datacenter,
|
||||
private_network_ids,
|
||||
placement_group,
|
||||
enable_public_ipv4,
|
||||
enable_public_ipv6,
|
||||
firewall_ids,
|
||||
server_type,
|
||||
ssh_keys,
|
||||
user_data,
|
||||
use_public_ip,
|
||||
labels,
|
||||
volumes,
|
||||
port: _,
|
||||
} = config;
|
||||
let datacenter = hetzner_datacenter(datacenter);
|
||||
|
||||
// Create volumes and get their ids
|
||||
let mut volume_ids = Vec::new();
|
||||
for volume in volumes {
|
||||
let body = CreateVolumeBody {
|
||||
name: volume.name,
|
||||
format: Some(hetzner_format(volume.format)),
|
||||
location: Some(datacenter.into()),
|
||||
labels: volume.labels,
|
||||
size: volume.size_gb,
|
||||
automount: None,
|
||||
server: None,
|
||||
};
|
||||
let id = hetzner
|
||||
.create_volume(&body)
|
||||
.await
|
||||
.context("failed to create hetzner volume")?
|
||||
.volume
|
||||
.id;
|
||||
volume_ids.push(id);
|
||||
}
|
||||
|
||||
// Make sure volumes are available before continue
|
||||
let vol_ids_poll = Arc::new(Mutex::new(volume_ids.clone()));
|
||||
for _ in 0..MAX_POLL_TRIES {
|
||||
if vol_ids_poll.lock().unwrap().is_empty() {
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
|
||||
let ids = vol_ids_poll.lock().unwrap().clone();
|
||||
let futures = ids.into_iter().map(|id| {
|
||||
let vol_ids = vol_ids_poll.clone();
|
||||
async move {
|
||||
let Ok(res) = hetzner.get_volume(id).await else {
|
||||
return;
|
||||
};
|
||||
if matches!(res.volume.status, HetznerVolumeStatus::Available)
|
||||
{
|
||||
vol_ids.lock().unwrap().retain(|_id| *_id != id);
|
||||
}
|
||||
}
|
||||
});
|
||||
join_all(futures).await;
|
||||
}
|
||||
if !vol_ids_poll.lock().unwrap().is_empty() {
|
||||
return Err(anyhow!("Volumes not ready after poll"));
|
||||
}
|
||||
|
||||
let body = CreateServerBody {
|
||||
name: name.to_string(),
|
||||
automount: None,
|
||||
datacenter: Some(datacenter),
|
||||
location: None,
|
||||
firewalls: firewall_ids
|
||||
.into_iter()
|
||||
.map(|firewall| create_server::Firewall { firewall })
|
||||
.collect(),
|
||||
image,
|
||||
labels,
|
||||
networks: private_network_ids,
|
||||
placement_group: (placement_group > 0).then_some(placement_group),
|
||||
public_net: (enable_public_ipv4 || enable_public_ipv6).then_some(
|
||||
create_server::PublicNet {
|
||||
enable_ipv4: enable_public_ipv4,
|
||||
enable_ipv6: enable_public_ipv6,
|
||||
ipv4: None,
|
||||
ipv6: None,
|
||||
},
|
||||
),
|
||||
server_type: hetzner_server_type(server_type),
|
||||
ssh_keys,
|
||||
start_after_create: true,
|
||||
user_data: (!user_data.is_empty()).then_some(user_data),
|
||||
volumes: volume_ids,
|
||||
};
|
||||
|
||||
let server_id = hetzner
|
||||
.create_server(&body)
|
||||
.await
|
||||
.context("failed to create hetnzer server")?
|
||||
.server
|
||||
.id;
|
||||
|
||||
for _ in 0..MAX_POLL_TRIES {
|
||||
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
|
||||
let Ok(res) = hetzner.get_server(server_id).await else {
|
||||
continue;
|
||||
};
|
||||
if matches!(res.server.status, HetznerServerStatus::Running) {
|
||||
let ip = if use_public_ip {
|
||||
res
|
||||
.server
|
||||
.public_net
|
||||
.ipv4
|
||||
.context("instance does not have public ipv4 attached")?
|
||||
.ip
|
||||
} else {
|
||||
res
|
||||
.server
|
||||
.private_net
|
||||
.first()
|
||||
.context("no private networks attached")?
|
||||
.ip
|
||||
.to_string()
|
||||
};
|
||||
let server = HetznerServerMinimal { id: server_id, ip };
|
||||
return Ok(server);
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"failed to verify server running after polling status"
|
||||
))
|
||||
}
|
||||
|
||||
fn hetzner_format(
|
||||
format: HetznerVolumeFormat,
|
||||
) -> common::HetznerVolumeFormat {
|
||||
match format {
|
||||
HetznerVolumeFormat::Xfs => common::HetznerVolumeFormat::Xfs,
|
||||
HetznerVolumeFormat::Ext4 => common::HetznerVolumeFormat::Ext4,
|
||||
}
|
||||
}
|
||||
|
||||
fn hetzner_datacenter(
|
||||
datacenter: HetznerDatacenter,
|
||||
) -> common::HetznerDatacenter {
|
||||
match datacenter {
|
||||
HetznerDatacenter::Nuremberg1Dc3 => {
|
||||
common::HetznerDatacenter::Nuremberg1Dc3
|
||||
}
|
||||
HetznerDatacenter::Helsinki1Dc2 => {
|
||||
common::HetznerDatacenter::Helsinki1Dc2
|
||||
}
|
||||
HetznerDatacenter::Falkenstein1Dc14 => {
|
||||
common::HetznerDatacenter::Falkenstein1Dc14
|
||||
}
|
||||
HetznerDatacenter::AshburnDc1 => {
|
||||
common::HetznerDatacenter::AshburnDc1
|
||||
}
|
||||
HetznerDatacenter::HillsboroDc1 => {
|
||||
common::HetznerDatacenter::HillsboroDc1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn hetzner_server_type(
|
||||
server_type: HetznerServerType,
|
||||
) -> common::HetznerServerType {
|
||||
match server_type {
|
||||
HetznerServerType::SharedIntel1Core2Ram20Disk => {
|
||||
common::HetznerServerType::SharedIntel1Core2Ram20Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd2Core2Ram40Disk => {
|
||||
common::HetznerServerType::SharedAmd2Core2Ram40Disk
|
||||
}
|
||||
HetznerServerType::SharedArm2Core4Ram40Disk => {
|
||||
common::HetznerServerType::SharedArm2Core4Ram40Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel2Core4Ram40Disk => {
|
||||
common::HetznerServerType::SharedIntel2Core4Ram40Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd3Core4Ram80Disk => {
|
||||
common::HetznerServerType::SharedAmd3Core4Ram80Disk
|
||||
}
|
||||
HetznerServerType::SharedArm4Core8Ram80Disk => {
|
||||
common::HetznerServerType::SharedArm4Core8Ram80Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel2Core8Ram80Disk => {
|
||||
common::HetznerServerType::SharedIntel2Core8Ram80Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd4Core8Ram160Disk => {
|
||||
common::HetznerServerType::SharedAmd4Core8Ram160Disk
|
||||
}
|
||||
HetznerServerType::SharedArm8Core16Ram160Disk => {
|
||||
common::HetznerServerType::SharedArm8Core16Ram160Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel4Core16Ram160Disk => {
|
||||
common::HetznerServerType::SharedIntel4Core16Ram160Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd8Core16Ram240Disk => {
|
||||
common::HetznerServerType::SharedAmd8Core16Ram240Disk
|
||||
}
|
||||
HetznerServerType::SharedArm16Core32Ram320Disk => {
|
||||
common::HetznerServerType::SharedArm16Core32Ram320Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel8Core32Ram240Disk => {
|
||||
common::HetznerServerType::SharedIntel8Core32Ram240Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd16Core32Ram360Disk => {
|
||||
common::HetznerServerType::SharedAmd16Core32Ram360Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd2Core8Ram80Disk => {
|
||||
common::HetznerServerType::DedicatedAmd2Core8Ram80Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd4Core16Ram160Disk => {
|
||||
common::HetznerServerType::DedicatedAmd4Core16Ram160Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd8Core32Ram240Disk => {
|
||||
common::HetznerServerType::DedicatedAmd8Core32Ram240Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd16Core64Ram360Disk => {
|
||||
common::HetznerServerType::DedicatedAmd16Core64Ram360Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd32Core128Ram600Disk => {
|
||||
common::HetznerServerType::DedicatedAmd32Core128Ram600Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd48Core192Ram960Disk => {
|
||||
common::HetznerServerType::DedicatedAmd48Core192Ram960Disk
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod aws;
|
||||
pub mod hetzner;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BuildCleanupData {
|
||||
|
||||
@@ -2,7 +2,13 @@ use std::sync::OnceLock;
|
||||
|
||||
use anyhow::Context;
|
||||
use merge_config_files::parse_config_file;
|
||||
use monitor_client::entities::config::core::{CoreConfig, Env};
|
||||
use monitor_client::entities::{
|
||||
config::core::{
|
||||
AwsCredentials, CoreConfig, Env, HetznerCredentials, MongoConfig,
|
||||
OauthCredentials,
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
|
||||
pub fn frontend_path() -> &'static String {
|
||||
@@ -34,91 +40,123 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.context("failed to parse core Env")
|
||||
.unwrap();
|
||||
let config_path = &env.monitor_config_path;
|
||||
let mut config =
|
||||
let config =
|
||||
parse_config_file::<CoreConfig>(config_path.as_str())
|
||||
.unwrap_or_else(|e| {
|
||||
panic!("failed at parsing config at {config_path} | {e:#}")
|
||||
});
|
||||
// recreating CoreConfig here makes sure we apply all env overrides.
|
||||
CoreConfig {
|
||||
title: env.monitor_title.unwrap_or(config.title),
|
||||
host: env.monitor_host.unwrap_or(config.host),
|
||||
port: env.monitor_port.unwrap_or(config.port),
|
||||
passkey: env.monitor_passkey.unwrap_or(config.passkey),
|
||||
jwt_valid_for: env
|
||||
.monitor_jwt_valid_for
|
||||
.unwrap_or(config.jwt_valid_for),
|
||||
sync_directory: env
|
||||
.monitor_sync_directory
|
||||
.map(|dir|
|
||||
dir.parse()
|
||||
.context("failed to parse env MONITOR_SYNC_DIRECTORY as valid path").unwrap())
|
||||
.unwrap_or(config.sync_directory),
|
||||
monitoring_interval: env
|
||||
.monitor_monitoring_interval
|
||||
.unwrap_or(config.monitoring_interval),
|
||||
keep_stats_for_days: env
|
||||
.monitor_keep_stats_for_days
|
||||
.unwrap_or(config.keep_stats_for_days),
|
||||
keep_alerts_for_days: env
|
||||
.monitor_keep_alerts_for_days
|
||||
.unwrap_or(config.keep_alerts_for_days),
|
||||
github_webhook_secret: env
|
||||
.monitor_github_webhook_secret
|
||||
.unwrap_or(config.github_webhook_secret),
|
||||
github_webhook_base_url: env
|
||||
.monitor_github_webhook_base_url
|
||||
.or(config.github_webhook_base_url),
|
||||
github_organizations: env.monitor_github_organizations
|
||||
.unwrap_or(config.github_organizations),
|
||||
docker_organizations: env
|
||||
.monitor_docker_organizations
|
||||
.unwrap_or(config.docker_organizations),
|
||||
transparent_mode: env
|
||||
.monitor_transparent_mode
|
||||
.unwrap_or(config.transparent_mode),
|
||||
ui_write_disabled: env
|
||||
.monitor_ui_write_disabled
|
||||
.unwrap_or(config.ui_write_disabled),
|
||||
local_auth: env.monitor_local_auth.unwrap_or(config.local_auth),
|
||||
google_oauth: OauthCredentials {
|
||||
enabled: env
|
||||
.monitor_google_oauth_enabled
|
||||
.unwrap_or(config.google_oauth.enabled),
|
||||
id: env
|
||||
.monitor_google_oauth_id
|
||||
.unwrap_or(config.google_oauth.id),
|
||||
secret: env
|
||||
.monitor_google_oauth_secret
|
||||
.unwrap_or(config.google_oauth.secret),
|
||||
},
|
||||
github_oauth: OauthCredentials {
|
||||
enabled: env
|
||||
.monitor_github_oauth_enabled
|
||||
.unwrap_or(config.github_oauth.enabled),
|
||||
id: env
|
||||
.monitor_github_oauth_id
|
||||
.unwrap_or(config.github_oauth.id),
|
||||
secret: env
|
||||
.monitor_github_oauth_secret
|
||||
.unwrap_or(config.github_oauth.secret),
|
||||
},
|
||||
aws: AwsCredentials {
|
||||
access_key_id: env
|
||||
.monitor_aws_access_key_id
|
||||
.unwrap_or(config.aws.access_key_id),
|
||||
secret_access_key: env
|
||||
.monitor_aws_secret_access_key
|
||||
.unwrap_or(config.aws.secret_access_key),
|
||||
},
|
||||
hetzner: HetznerCredentials {
|
||||
token: env
|
||||
.monitor_hetzner_token
|
||||
.unwrap_or(config.hetzner.token),
|
||||
},
|
||||
mongo: MongoConfig {
|
||||
uri: env.monitor_mongo_uri.or(config.mongo.uri),
|
||||
address: env.monitor_mongo_address.or(config.mongo.address),
|
||||
username: env
|
||||
.monitor_mongo_username
|
||||
.or(config.mongo.username),
|
||||
password: env
|
||||
.monitor_mongo_password
|
||||
.or(config.mongo.password),
|
||||
app_name: env
|
||||
.monitor_mongo_app_name
|
||||
.unwrap_or(config.mongo.app_name),
|
||||
db_name: env
|
||||
.monitor_mongo_db_name
|
||||
.unwrap_or(config.mongo.db_name),
|
||||
},
|
||||
logging: LogConfig {
|
||||
level: env
|
||||
.monitor_logging_level
|
||||
.unwrap_or(config.logging.level),
|
||||
stdio: env
|
||||
.monitor_logging_stdio
|
||||
.unwrap_or(config.logging.stdio),
|
||||
otlp_endpoint: env
|
||||
.monitor_logging_otlp_endpoint
|
||||
.or(config.logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.monitor_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.logging.opentelemetry_service_name),
|
||||
},
|
||||
|
||||
// Overrides
|
||||
config.title = env.monitor_title.unwrap_or(config.title);
|
||||
config.host = env.monitor_host.unwrap_or(config.host);
|
||||
config.port = env.monitor_port.unwrap_or(config.port);
|
||||
config.passkey = env.monitor_passkey.unwrap_or(config.passkey);
|
||||
config.jwt_valid_for =
|
||||
env.monitor_jwt_valid_for.unwrap_or(config.jwt_valid_for);
|
||||
config.monitoring_interval = env
|
||||
.monitor_monitoring_interval
|
||||
.unwrap_or(config.monitoring_interval);
|
||||
config.keep_stats_for_days = env
|
||||
.monitor_keep_stats_for_days
|
||||
.unwrap_or(config.keep_stats_for_days);
|
||||
config.keep_alerts_for_days = env
|
||||
.monitor_keep_alerts_for_days
|
||||
.unwrap_or(config.keep_alerts_for_days);
|
||||
config.github_webhook_secret = env
|
||||
.monitor_github_webhook_secret
|
||||
.unwrap_or(config.github_webhook_secret);
|
||||
config.github_webhook_base_url = env
|
||||
.monitor_github_webhook_base_url
|
||||
.or(config.github_webhook_base_url);
|
||||
config.docker_organizations = env
|
||||
.monitor_docker_organizations
|
||||
.unwrap_or(config.docker_organizations);
|
||||
|
||||
config.logging.level =
|
||||
env.monitor_logging_level.unwrap_or(config.logging.level);
|
||||
config.logging.stdio =
|
||||
env.monitor_logging_stdio.unwrap_or(config.logging.stdio);
|
||||
config.logging.otlp_endpoint = env
|
||||
.monitor_logging_otlp_endpoint
|
||||
.or(config.logging.otlp_endpoint);
|
||||
config.logging.opentelemetry_service_name = env
|
||||
.monitor_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.logging.opentelemetry_service_name);
|
||||
|
||||
config.local_auth =
|
||||
env.monitor_local_auth.unwrap_or(config.local_auth);
|
||||
|
||||
config.github_oauth.enabled = env
|
||||
.monitor_github_oauth_enabled
|
||||
.unwrap_or(config.github_oauth.enabled);
|
||||
config.github_oauth.id = env
|
||||
.monitor_github_oauth_id
|
||||
.unwrap_or(config.github_oauth.id);
|
||||
config.github_oauth.secret = env
|
||||
.monitor_github_oauth_secret
|
||||
.unwrap_or(config.github_oauth.secret);
|
||||
|
||||
config.google_oauth.enabled = env
|
||||
.monitor_google_oauth_enabled
|
||||
.unwrap_or(config.google_oauth.enabled);
|
||||
config.google_oauth.id = env
|
||||
.monitor_google_oauth_id
|
||||
.unwrap_or(config.google_oauth.id);
|
||||
config.google_oauth.secret = env
|
||||
.monitor_google_oauth_secret
|
||||
.unwrap_or(config.google_oauth.secret);
|
||||
|
||||
config.mongo.uri = env.monitor_mongo_uri.or(config.mongo.uri);
|
||||
config.mongo.address =
|
||||
env.monitor_mongo_address.or(config.mongo.address);
|
||||
config.mongo.username =
|
||||
env.monitor_mongo_username.or(config.mongo.username);
|
||||
config.mongo.password =
|
||||
env.monitor_mongo_password.or(config.mongo.password);
|
||||
config.mongo.app_name =
|
||||
env.monitor_mongo_app_name.unwrap_or(config.mongo.app_name);
|
||||
config.mongo.db_name =
|
||||
env.monitor_mongo_db_name.unwrap_or(config.mongo.db_name);
|
||||
|
||||
config.aws.access_key_id = env
|
||||
.monitor_aws_access_key_id
|
||||
.unwrap_or(config.aws.access_key_id);
|
||||
config.aws.secret_access_key = env
|
||||
.monitor_aws_secret_access_key
|
||||
.unwrap_or(config.aws.secret_access_key);
|
||||
|
||||
config
|
||||
// These can't be overridden on env
|
||||
secrets: config.secrets,
|
||||
github_accounts: config.github_accounts,
|
||||
docker_accounts: config.docker_accounts,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use mongo_indexed::{create_index, create_unique_index, Indexed};
|
||||
use mongo_indexed::{create_index, create_unique_index};
|
||||
use monitor_client::entities::{
|
||||
alert::Alert,
|
||||
alerter::Alerter,
|
||||
@@ -12,10 +12,12 @@ use monitor_client::entities::{
|
||||
repo::Repo,
|
||||
server::{stats::SystemStatsRecord, Server},
|
||||
server_template::ServerTemplate,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
update::Update,
|
||||
user::User,
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
};
|
||||
use mungos::{
|
||||
init::MongoBuilder,
|
||||
@@ -28,6 +30,7 @@ pub struct DbClient {
|
||||
pub permissions: Collection<Permission>,
|
||||
pub api_keys: Collection<ApiKey>,
|
||||
pub tags: Collection<Tag>,
|
||||
pub variables: Collection<Variable>,
|
||||
pub updates: Collection<Update>,
|
||||
pub alerts: Collection<Alert>,
|
||||
pub stats: Collection<SystemStatsRecord>,
|
||||
@@ -40,6 +43,7 @@ pub struct DbClient {
|
||||
pub procedures: Collection<Procedure>,
|
||||
pub alerters: Collection<Alerter>,
|
||||
pub server_templates: Collection<ServerTemplate>,
|
||||
pub resource_syncs: Collection<ResourceSync>,
|
||||
//
|
||||
pub db: Database,
|
||||
}
|
||||
@@ -80,14 +84,16 @@ impl DbClient {
|
||||
let db = client.database(db_name);
|
||||
|
||||
let client = DbClient {
|
||||
users: User::collection(&db, true).await?,
|
||||
user_groups: UserGroup::collection(&db, true).await?,
|
||||
permissions: Permission::collection(&db, true).await?,
|
||||
api_keys: ApiKey::collection(&db, true).await?,
|
||||
tags: Tag::collection(&db, true).await?,
|
||||
updates: Update::collection(&db, true).await?,
|
||||
alerts: Alert::collection(&db, true).await?,
|
||||
stats: SystemStatsRecord::collection(&db, true).await?,
|
||||
users: mongo_indexed::collection(&db, true).await?,
|
||||
user_groups: mongo_indexed::collection(&db, true).await?,
|
||||
permissions: mongo_indexed::collection(&db, true).await?,
|
||||
api_keys: mongo_indexed::collection(&db, true).await?,
|
||||
tags: mongo_indexed::collection(&db, true).await?,
|
||||
variables: mongo_indexed::collection(&db, true).await?,
|
||||
updates: mongo_indexed::collection(&db, true).await?,
|
||||
alerts: mongo_indexed::collection(&db, true).await?,
|
||||
stats: mongo_indexed::collection(&db, true).await?,
|
||||
// RESOURCES
|
||||
servers: resource_collection(&db, "Server").await?,
|
||||
deployments: resource_collection(&db, "Deployment").await?,
|
||||
builds: resource_collection(&db, "Build").await?,
|
||||
@@ -97,6 +103,9 @@ impl DbClient {
|
||||
procedures: resource_collection(&db, "Procedure").await?,
|
||||
server_templates: resource_collection(&db, "ServerTemplate")
|
||||
.await?,
|
||||
resource_syncs: resource_collection(&db, "ResourceSync")
|
||||
.await?,
|
||||
//
|
||||
db,
|
||||
};
|
||||
Ok(client)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user