forked from github-starred/komodo
Compare commits
171 Commits
v1.19.1-de
...
v2.0.0-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
059716f178 | ||
|
|
0bee1fe2c5 | ||
|
|
1e58c1a958 | ||
|
|
ed1431db0a | ||
|
|
dc769ff159 | ||
|
|
098f23ac4c | ||
|
|
03f577d22f | ||
|
|
95ca217362 | ||
|
|
6d61045764 | ||
|
|
34e075eaf3 | ||
|
|
232dc0bb4e | ||
|
|
0cc0ee2aab | ||
|
|
edebe925ff | ||
|
|
5fd45bbc7b | ||
|
|
0a490dadb2 | ||
|
|
23847c15bc | ||
|
|
0d238aee4f | ||
|
|
98ad6cf5fa | ||
|
|
e35b81630b | ||
|
|
1215852fe4 | ||
|
|
4164b76ff5 | ||
|
|
26a9daffeb | ||
|
|
8bb9f16e9b | ||
|
|
b6eaf76497 | ||
|
|
073893da0e | ||
|
|
e71547f1c2 | ||
|
|
1991627990 | ||
|
|
3434d827a3 | ||
|
|
1ef8b9878a | ||
|
|
07ddaa8377 | ||
|
|
142c08cde4 | ||
|
|
1aa1422faa | ||
|
|
1394e8a6b1 | ||
|
|
420ee10211 | ||
|
|
e918461dc5 | ||
|
|
4dc9ca27be | ||
|
|
f49b186f2f | ||
|
|
6e039b41f1 | ||
|
|
e7cd77b022 | ||
|
|
556cbd04c7 | ||
|
|
4e3d181466 | ||
|
|
5d4326f46f | ||
|
|
4bb486ad0a | ||
|
|
d29c5112d8 | ||
|
|
d41315b8a4 | ||
|
|
847404388c | ||
|
|
eef8ec59b8 | ||
|
|
9eb32f9ff5 | ||
|
|
859bfe67ef | ||
|
|
21ea469cd4 | ||
|
|
7fb902b892 | ||
|
|
c9c4ac47ee | ||
|
|
f228cd31f3 | ||
|
|
4feecb4b97 | ||
|
|
e2680d0942 | ||
|
|
7422c0730d | ||
|
|
37ac0dc7e3 | ||
|
|
dccaca1df4 | ||
|
|
886aea4c36 | ||
|
|
cbca070bae | ||
|
|
b4bdd401f6 | ||
|
|
e546166240 | ||
|
|
21689ce0ad | ||
|
|
941787db64 | ||
|
|
d4b1aacac3 | ||
|
|
30f89461bf | ||
|
|
a42d1397e9 | ||
|
|
b29313c28f | ||
|
|
08a246a90c | ||
|
|
1a08df28d0 | ||
|
|
a226ffc256 | ||
|
|
b385ee5ec3 | ||
|
|
c78c34357d | ||
|
|
4b7c692f00 | ||
|
|
1ac98a096e | ||
|
|
281a2dc1ce | ||
|
|
0fe91378a6 | ||
|
|
11e76d1cf2 | ||
|
|
a3bcd71105 | ||
|
|
3ecc56dd76 | ||
|
|
7239cbb19b | ||
|
|
a0540f7011 | ||
|
|
37aea7605e | ||
|
|
78be913541 | ||
|
|
c34f5ebf49 | ||
|
|
e5822cefb8 | ||
|
|
4baab194cf | ||
|
|
a896583da6 | ||
|
|
7b2674c38b | ||
|
|
d1e32989e3 | ||
|
|
e802bb3882 | ||
|
|
27a38b1bf5 | ||
|
|
2bc8a754be | ||
|
|
7a2a54bec1 | ||
|
|
6a15150d59 | ||
|
|
1b1dca76da | ||
|
|
a032f0f4ff | ||
|
|
2749d49435 | ||
|
|
d88e42ef2d | ||
|
|
a370e7d121 | ||
|
|
d139ad2b3d | ||
|
|
8d2d180398 | ||
|
|
37ca4ca986 | ||
|
|
33e73b8543 | ||
|
|
cf6e36e90c | ||
|
|
9eb8b32f4a | ||
|
|
b400add6f1 | ||
|
|
24adb89d25 | ||
|
|
4674b2badb | ||
|
|
65d1a69cb9 | ||
|
|
0da5718991 | ||
|
|
6b26cd120c | ||
|
|
28e1bb19a4 | ||
|
|
166107ac07 | ||
|
|
d77201880f | ||
|
|
1d7629e9b2 | ||
|
|
198f690ca5 | ||
|
|
531c79a144 | ||
|
|
d685862713 | ||
|
|
af0f245b5b | ||
|
|
cba36861b7 | ||
|
|
2c2c1d47b4 | ||
|
|
3a6b997241 | ||
|
|
7122f79b9d | ||
|
|
9bcee8122b | ||
|
|
a49c98946e | ||
|
|
7d222a7241 | ||
|
|
33501dac3e | ||
|
|
4675dfa736 | ||
|
|
0be51dc784 | ||
|
|
52453d1320 | ||
|
|
25da97ac1a | ||
|
|
02db5a11d3 | ||
|
|
89a5272246 | ||
|
|
ae51ea1ad6 | ||
|
|
3bdb4bea16 | ||
|
|
677bb14b5d | ||
|
|
6700700a80 | ||
|
|
996d4aa129 | ||
|
|
75894a7282 | ||
|
|
2a065edcf1 | ||
|
|
6f3703acfb | ||
|
|
59e989ecdf | ||
|
|
951ff34a9e | ||
|
|
2d83105500 | ||
|
|
3d455f5142 | ||
|
|
01de8c4a9b | ||
|
|
d5de338561 | ||
|
|
58c1afb8ef | ||
|
|
230f357b5a | ||
|
|
991c95fff0 | ||
|
|
f6243fe6b1 | ||
|
|
9feeccba6e | ||
|
|
673c7f3a6b | ||
|
|
39f900d651 | ||
|
|
8a06a0d6ce | ||
|
|
7789ee4f4a | ||
|
|
0472b6a7f7 | ||
|
|
d1d2227d36 | ||
|
|
cea7c5fc5e | ||
|
|
34a9f8eb9e | ||
|
|
494d01aeed | ||
|
|
084e2fec23 | ||
|
|
98d72fc908 | ||
|
|
20ac04fae5 | ||
|
|
a65fd4dca7 | ||
|
|
0873104b5a | ||
|
|
9a7b6ebd51 | ||
|
|
a4153fa28b | ||
|
|
e732da3b05 | ||
|
|
75ffbd559b |
10
.vscode/resolver.code-snippets
vendored
10
.vscode/resolver.code-snippets
vendored
@@ -3,8 +3,8 @@
|
||||
"scope": "rust",
|
||||
"prefix": "resolve",
|
||||
"body": [
|
||||
"impl Resolve<${1}, User> for State {",
|
||||
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
|
||||
"impl Resolve<${0}> for ${1} {",
|
||||
"\tasync fn resolve(self, _: &${0}) -> Result<Self::Response, Self::Error> {",
|
||||
"\t\ttodo!()",
|
||||
"\t}",
|
||||
"}"
|
||||
@@ -15,9 +15,9 @@
|
||||
"prefix": "static",
|
||||
"body": [
|
||||
"fn ${1}() -> &'static ${2} {",
|
||||
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${3}.get_or_init(|| {",
|
||||
"\t\t${0}",
|
||||
"\tstatic ${0}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${0}.get_or_init(|| {",
|
||||
"\t\ttodo!()",
|
||||
"\t})",
|
||||
"}"
|
||||
]
|
||||
|
||||
1633
Cargo.lock
generated
1633
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
89
Cargo.toml
89
Cargo.toml
@@ -8,13 +8,16 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.19.0"
|
||||
version = "2.0.0-dev-36"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/moghtech/komodo"
|
||||
homepage = "https://komo.do"
|
||||
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
komodo_client = { path = "client/core/rs" }
|
||||
@@ -23,31 +26,33 @@ environment_file = { path = "lib/environment_file" }
|
||||
environment = { path = "lib/environment" }
|
||||
interpolate = { path = "lib/interpolate" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
transport = { path = "lib/transport" }
|
||||
database = { path = "lib/database" }
|
||||
response = { path = "lib/response" }
|
||||
command = { path = "lib/command" }
|
||||
config = { path = "lib/config" }
|
||||
logger = { path = "lib/logger" }
|
||||
cache = { path = "lib/cache" }
|
||||
noise = { path = "lib/noise" }
|
||||
git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.5.0", default-features = false }
|
||||
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
serror = { version = "0.5.3", default-features = false }
|
||||
slack = { version = "1.1.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
async_timing_util = "1.0.0"
|
||||
async_timing_util = "1.1.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "2.0.2"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "1.2.0"
|
||||
mungos = "3.2.1"
|
||||
mungos = "3.2.2"
|
||||
svi = "1.2.0"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.22", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
reqwest = { version = "0.12.23", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.47.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
@@ -57,79 +62,87 @@ futures-util = "0.3.31"
|
||||
arc-swap = "1.7.1"
|
||||
|
||||
# SERVER
|
||||
tokio-tungstenite = { version = "0.27.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.1", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.4", features = ["fs", "cors"] }
|
||||
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.3", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
|
||||
axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
|
||||
|
||||
# SER/DE
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
indexmap = { version = "2.10.0", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
indexmap = { version = "2.11.4", features = ["serde"] }
|
||||
serde = { version = "1.0.227", features = ["derive"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
bson = { version = "2.15.0" } # must keep in sync with mongodb version
|
||||
serde_yaml_ng = "0.10.0"
|
||||
serde_json = "1.0.142"
|
||||
serde_json = "1.0.145"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.9.5"
|
||||
toml = "0.9.7"
|
||||
url = "2.5.7"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.99"
|
||||
thiserror = "2.0.14"
|
||||
anyhow = "1.0.100"
|
||||
thiserror = "2.0.17"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry-otlp = { version = "0.30.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.30.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
opentelemetry = "0.31.0"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.43", features = ["derive"] }
|
||||
clap = { version = "4.5.48", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO / AUTH
|
||||
uuid = { version = "1.17.0", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "9.3.1", default-features = false }
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "9.3.1", default-features = false } # locked back with octorust
|
||||
rustls = { version = "0.23.32", features = ["aws-lc-rs"] }
|
||||
pem-rfc7468 = { version = "0.7.0", features = ["alloc"] }
|
||||
openidconnect = "4.0.1"
|
||||
urlencoding = "2.1.3"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.17.0"
|
||||
bcrypt = "0.17.1"
|
||||
base64 = "0.22.1"
|
||||
rustls = "0.23.31"
|
||||
pkcs8 = "0.10.2"
|
||||
snow = "0.10.0"
|
||||
hmac = "0.12.1"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
rand = "0.9.2"
|
||||
hex = "0.4.3"
|
||||
spki = "0.7.3"
|
||||
der = "0.7.10"
|
||||
|
||||
# SYSTEM
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.19.2"
|
||||
sysinfo = "0.37.0"
|
||||
bollard = "0.19.3"
|
||||
sysinfo = "0.37.1"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.8.5"
|
||||
aws-sdk-ec2 = "1.159.0"
|
||||
aws-credential-types = "1.2.5"
|
||||
aws-config = "1.8.6"
|
||||
aws-sdk-ec2 = "1.170.1"
|
||||
aws-credential-types = "1.2.6"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.6"
|
||||
chrono-tz = "0.10.4"
|
||||
chrono = "0.4.41"
|
||||
chrono = "0.4.42"
|
||||
croner = "3.0.0"
|
||||
|
||||
# MISC
|
||||
async-compression = { version = "0.4.27", features = ["tokio", "gzip"] }
|
||||
async-compression = { version = "0.4.32", features = ["tokio", "gzip"] }
|
||||
derive_builder = "0.20.2"
|
||||
comfy-table = "7.1.4"
|
||||
shell-escape = "0.1.5"
|
||||
comfy-table = "7.2.1"
|
||||
typeshare = "1.0.4"
|
||||
octorust = "0.10.0"
|
||||
dashmap = "6.1.0"
|
||||
wildcard = "0.3.0"
|
||||
colored = "3.0.0"
|
||||
regex = "1.11.1"
|
||||
bytes = "1.10.1"
|
||||
bson = "2.15.0"
|
||||
regex = "1.11.3"
|
||||
bytes = "1.10.1"
|
||||
2
action/build.ts
Normal file
2
action/build.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("build-komodo");
|
||||
5
action/deno.json
Normal file
5
action/deno.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"imports": {
|
||||
"@std/toml": "jsr:@std/toml"
|
||||
}
|
||||
}
|
||||
2
action/deploy.ts
Executable file
2
action/deploy.ts
Executable file
@@ -0,0 +1,2 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("deploy-komodo");
|
||||
52
action/run.ts
Normal file
52
action/run.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import * as TOML from "@std/toml";
|
||||
|
||||
export const run = async (action: string) => {
|
||||
const branch = await new Deno.Command("bash", {
|
||||
args: ["-c", "git rev-parse --abbrev-ref HEAD"],
|
||||
})
|
||||
.output()
|
||||
.then((r) => new TextDecoder("utf-8").decode(r.stdout).trim());
|
||||
|
||||
const cargo_toml_str = await Deno.readTextFile("Cargo.toml");
|
||||
const prev_version = (
|
||||
TOML.parse(cargo_toml_str) as {
|
||||
workspace: { package: { version: string } };
|
||||
}
|
||||
).workspace.package.version;
|
||||
|
||||
const [version, tag, count] = prev_version.split("-");
|
||||
const next_count = Number(count) + 1;
|
||||
|
||||
const next_version = `${version}-${tag}-${next_count}`;
|
||||
|
||||
await Deno.writeTextFile(
|
||||
"Cargo.toml",
|
||||
cargo_toml_str.replace(
|
||||
`version = "${prev_version}"`,
|
||||
`version = "${next_version}"`
|
||||
)
|
||||
);
|
||||
|
||||
// Cargo check first here to make sure lock file is updated before commit.
|
||||
const cmd = `
|
||||
cargo check
|
||||
echo ""
|
||||
|
||||
git add --all
|
||||
git commit --all --message "deploy ${version}-${tag}-${next_count}"
|
||||
|
||||
echo ""
|
||||
git push
|
||||
echo ""
|
||||
|
||||
km run -y action ${action} "KOMODO_BRANCH=${branch}&KOMODO_VERSION=${version}&KOMODO_TAG=${tag}-${next_count}"
|
||||
`
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0 && !line.startsWith("//"))
|
||||
.join(" && ");
|
||||
|
||||
new Deno.Command("bash", {
|
||||
args: ["-c", cmd],
|
||||
}).spawn();
|
||||
};
|
||||
@@ -1,7 +1,8 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -16,7 +17,8 @@ COPY ./bin/cli ./bin/cli
|
||||
RUN \
|
||||
cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_periphery --release && \
|
||||
cargo build -p komodo_cli --release
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
@@ -25,6 +27,6 @@ COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
## Uses chef for dependency caching to help speed up back-to-back builds.
|
||||
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.89.0-bullseye AS chef
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
|
||||
WORKDIR /builder
|
||||
|
||||
# Plan just the RECIPE to see if things have changed
|
||||
@@ -12,6 +12,7 @@ COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef AS builder
|
||||
RUN cargo install cargo-strip
|
||||
COPY --from=planner /builder/recipe.json recipe.json
|
||||
# Build JUST dependencies - cached layer
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
@@ -20,7 +21,8 @@ COPY . .
|
||||
RUN \
|
||||
cargo build --release --bin core && \
|
||||
cargo build --release --bin periphery && \
|
||||
cargo build --release --bin km
|
||||
cargo build --release --bin km && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
@@ -29,6 +31,6 @@ COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -19,6 +19,7 @@ komodo_client.workspace = true
|
||||
database.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
noise.workspace = true
|
||||
# external
|
||||
futures-util.workspace = true
|
||||
comfy-table.workspace = true
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -8,7 +9,7 @@ COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN cargo build -p komodo_cli --release
|
||||
RUN cargo build -p komodo_cli --release && cargo strip
|
||||
|
||||
# Copy binaries to distroless base
|
||||
FROM gcr.io/distroless/cc
|
||||
@@ -19,6 +20,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -24,6 +24,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -13,6 +13,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -292,7 +292,7 @@ impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
Cell::new(self.0.unwrap_or("Unknown")),
|
||||
ports,
|
||||
Cell::new(networks.join(", ")),
|
||||
Cell::new(clamp_sha(&image)),
|
||||
Cell::new(clamp_sha(image)),
|
||||
];
|
||||
if !links {
|
||||
return res;
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::entities::{
|
||||
config::cli::args::database::DatabaseCommand, optional_string,
|
||||
};
|
||||
@@ -21,6 +22,7 @@ pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
|
||||
DatabaseCommand::Copy { yes, index, .. } => {
|
||||
copy(*index, *yes).await
|
||||
}
|
||||
DatabaseCommand::V1Downgrade { yes } => v1_downgrade(*yes).await,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,3 +320,45 @@ async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
|
||||
|
||||
database::utils::copy(&source_db, &target_db).await
|
||||
}
|
||||
|
||||
async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} 🦎",
|
||||
"Komodo".bold(),
|
||||
"V1 Downgrade".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Downgrade the database to V1 compatible data structures."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("run downgrade", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
db.collection::<Document>("Server")
|
||||
.update_many(doc! {}, doc! { "$set": { "info": null } })
|
||||
.await
|
||||
.context("Failed to downgrade Server schema")?;
|
||||
|
||||
info!("V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -212,9 +212,15 @@ pub async fn handle(
|
||||
Execution::BatchDestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunStackService(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::SendAlert(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::ClearRepoCache(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -224,6 +230,9 @@ pub async fn handle(
|
||||
Execution::GlobalAutoUpdate(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateAllServerKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -464,10 +473,18 @@ pub async fn handle(
|
||||
Execution::BatchDestroyStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunStackService(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::TestAlerter(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::SendAlert(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::ClearRepoCache(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
@@ -480,6 +497,10 @@ pub async fn handle(
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateAllServerKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
@@ -535,20 +556,20 @@ async fn poll_update_until_complete(
|
||||
} else {
|
||||
format!("{}/updates/{}", cli_config().host, update.id)
|
||||
};
|
||||
info!("Link: '{}'", link.bold());
|
||||
println!("Link: '{}'", link.bold());
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
let update = client.poll_update_until_complete(&update.id).await?;
|
||||
if update.success {
|
||||
info!(
|
||||
println!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION SUCCESSFUL".green(),
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
eprintln!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION FAILED".red(),
|
||||
|
||||
@@ -233,7 +233,7 @@ async fn list_schedules(
|
||||
}
|
||||
|
||||
fn fix_tags<T>(
|
||||
resources: &mut Vec<ResourceListItem<T>>,
|
||||
resources: &mut [ResourceListItem<T>],
|
||||
tags: &HashMap<String, String>,
|
||||
) {
|
||||
resources.iter_mut().for_each(|resource| {
|
||||
@@ -794,7 +794,7 @@ impl PrintTable for ResourceListItem<ServerListItemInfo> {
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.address),
|
||||
Cell::new(self.info.address.as_deref().unwrap_or("inbound")),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
];
|
||||
if links {
|
||||
|
||||
@@ -111,7 +111,7 @@ fn print_items<T: PrintTable + Serialize>(
|
||||
};
|
||||
table.load_preset(preset).set_header(
|
||||
T::header(links)
|
||||
.into_iter()
|
||||
.iter()
|
||||
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
|
||||
);
|
||||
for item in items {
|
||||
|
||||
@@ -41,6 +41,12 @@ async fn app() -> anyhow::Result<()> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
args::Command::Key { command } => {
|
||||
noise::key::command::handle(command).await
|
||||
}
|
||||
args::Command::Database { command } => {
|
||||
command::database::handle(command).await
|
||||
}
|
||||
args::Command::Container(container) => {
|
||||
command::container::handle(container).await
|
||||
}
|
||||
@@ -54,9 +60,6 @@ async fn app() -> anyhow::Result<()> {
|
||||
args::Command::Update { command } => {
|
||||
command::update::handle(command).await
|
||||
}
|
||||
args::Command::Database { command } => {
|
||||
command::database::handle(command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,12 +20,14 @@ periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
interpolate.workspace = true
|
||||
formatting.workspace = true
|
||||
transport.workspace = true
|
||||
database.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
cache.workspace = true
|
||||
noise.workspace = true
|
||||
git.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
@@ -38,7 +40,6 @@ slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
aws-credential-types.workspace = true
|
||||
tokio-tungstenite.workspace = true
|
||||
english-to-cron.workspace = true
|
||||
openidconnect.workspace = true
|
||||
jsonwebtoken.workspace = true
|
||||
@@ -70,6 +71,7 @@ chrono.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
rustls.workspace = true
|
||||
bytes.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
regex.workspace = true
|
||||
@@ -81,3 +83,4 @@ rand.workspace = true
|
||||
hmac.workspace = true
|
||||
sha2.workspace = true
|
||||
hex.workspace = true
|
||||
url.workspace = true
|
||||
@@ -1,7 +1,8 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.89.0-bullseye AS core-builder
|
||||
FROM rust:1.90.0-bullseye AS core-builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -13,7 +14,8 @@ COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile app
|
||||
RUN cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_cli --release
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
@@ -56,6 +58,6 @@ ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -54,6 +54,6 @@ ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -43,6 +43,6 @@ ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -17,6 +17,28 @@ pub async fn send_alert(
|
||||
"{level} | If you see this message, then Alerter **{name}** is **working**\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Version mismatch detected ⚠️\nPeriphery: **{server_version}** | Core: **{core_version}**\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -207,6 +229,16 @@ pub async fn send_alert(
|
||||
"{level} | **{name}** ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !content.is_empty() {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use ::slack::types::Block;
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use derive_variants::ExtractVariant;
|
||||
@@ -48,8 +47,9 @@ pub async fn send_alerts(alerts: &[Alert]) {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
let handles = alerts
|
||||
.iter()
|
||||
.map(|alert| send_alert_to_alerters(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
@@ -58,7 +58,7 @@ pub async fn send_alerts(alerts: &[Alert]) {
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -250,3 +250,238 @@ fn resource_link(
|
||||
id,
|
||||
)
|
||||
}
|
||||
|
||||
/// Standard message content format
|
||||
/// used by Ntfy, Pushover.
|
||||
fn standard_alert_content(alert: &Alert) -> String {
|
||||
let level = fmt_level(alert.level);
|
||||
match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,205 +8,7 @@ pub async fn send_alert(
|
||||
email: Option<&str>,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
let content = standard_alert_content(alert);
|
||||
if !content.is_empty() {
|
||||
send_message(url, email, content).await?;
|
||||
}
|
||||
|
||||
@@ -7,205 +7,7 @@ pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
let content = standard_alert_content(alert);
|
||||
if !content.is_empty() {
|
||||
send_message(url, content).await?;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use ::slack::types::OwnedBlock as Block;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
@@ -23,6 +25,35 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let text = match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Periphery version now matches Core version ✅"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}"
|
||||
)
|
||||
}
|
||||
};
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Server,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -429,6 +460,12 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
let text = format!("{level} | {message}");
|
||||
let blocks =
|
||||
vec![Block::header(text.clone()), Block::section(details)];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
@@ -442,17 +479,20 @@ pub async fn send_alert(
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack.send_message(text, blocks).await.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
slack
|
||||
.send_owned_message_single(&text, blocks.as_deref())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -3,11 +3,12 @@ use std::{sync::OnceLock, time::Instant};
|
||||
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::auth::*, entities::user::User};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use serror::{AddStatusCode, Json};
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -152,7 +153,11 @@ impl Resolve<AuthArgs> for GetUser {
|
||||
self,
|
||||
AuthArgs { headers }: &AuthArgs,
|
||||
) -> serror::Result<User> {
|
||||
let user_id = get_user_id_from_headers(headers).await?;
|
||||
Ok(get_user(&user_id).await?)
|
||||
let user_id = get_user_id_from_headers(headers)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
get_user(&user_id)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,8 +92,11 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure action not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
let _action_guard = action_state.update_custom(
|
||||
|state| state.running += 1,
|
||||
|state| state.running -= 1,
|
||||
false,
|
||||
)?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
|
||||
@@ -1,18 +1,22 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use futures::{TryStreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::TestAlerter,
|
||||
api::execute::{SendAlert, TestAlerter},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
alert::send_alert_to_alerter, helpers::update::update_update,
|
||||
permission::get_check_permissions,
|
||||
permission::get_check_permissions, resource::list_full_for_user,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
@@ -71,3 +75,75 @@ impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ExecuteArgs> for SendAlert {
|
||||
#[instrument(name = "SendAlert", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerters = list_full_for_user::<Alerter>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
&[],
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|a| {
|
||||
a.config.enabled
|
||||
&& (self.alerters.is_empty()
|
||||
|| self.alerters.contains(&a.name)
|
||||
|| self.alerters.contains(&a.id))
|
||||
&& (a.config.alert_types.is_empty()
|
||||
|| a.config.alert_types.contains(&AlertDataVariant::Custom))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if alerters.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"
|
||||
).status_code(StatusCode::BAD_REQUEST));
|
||||
}
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let ts = komodo_timestamp();
|
||||
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
resolved: true,
|
||||
level: self.level,
|
||||
target: update.target.clone(),
|
||||
data: AlertData::Custom {
|
||||
message: self.message,
|
||||
details: self.details,
|
||||
},
|
||||
resolved_ts: Some(ts),
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"Send alert",
|
||||
serde_json::to_string_pretty(&alert)
|
||||
.context("Failed to serialize alert to JSON")?,
|
||||
);
|
||||
|
||||
if let Err(e) = alerters
|
||||
.iter()
|
||||
.map(|alerter| send_alert_to_alerter(alerter, &alert))
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
{
|
||||
update.push_error_log("Send Error", format_serror(&e.into()));
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use std::{future::IntoFuture, time::Duration};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::IntoFuture,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
@@ -20,10 +24,10 @@ use komodo_client::{
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
all_logs_success,
|
||||
build::{Build, BuildConfig, ImageRegistryConfig},
|
||||
build::{Build, BuildConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
update::{Log, Update},
|
||||
@@ -133,8 +137,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let git_token =
|
||||
build_git_token(&mut build, repo.as_mut()).await?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build).await?;
|
||||
let registry_tokens =
|
||||
validate_account_extract_registry_tokens(&build).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
@@ -245,7 +249,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
@@ -284,19 +288,17 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
repo,
|
||||
registry_token,
|
||||
registry_tokens,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
// Push a commit hash tagged image
|
||||
additional_tags: if update.commit_hash.is_empty() {
|
||||
Default::default()
|
||||
} else {
|
||||
vec![update.commit_hash.clone()]
|
||||
},
|
||||
// To push a commit hash tagged image
|
||||
commit_hash: optional_string(&update.commit_hash),
|
||||
// Unused for now
|
||||
additional_tags: Default::default(),
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
@@ -342,7 +344,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(cleanup_data, &mut update).await;
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
@@ -608,34 +611,48 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token matching requirements.
|
||||
/// Otherwise it is left to periphery.
|
||||
async fn validate_account_extract_registry_token(
|
||||
async fn validate_account_extract_registry_tokens(
|
||||
Build {
|
||||
config:
|
||||
BuildConfig {
|
||||
image_registry:
|
||||
ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
},
|
||||
..
|
||||
},
|
||||
config: BuildConfig { image_registry, .. },
|
||||
..
|
||||
}: &Build,
|
||||
) -> serror::Result<Option<String>> {
|
||||
if domain.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
// Maps (domain, account) -> token
|
||||
) -> serror::Result<Vec<(String, String, String)>> {
|
||||
let mut res = HashMap::with_capacity(image_registry.capacity());
|
||||
|
||||
for (domain, account) in image_registry
|
||||
.iter()
|
||||
.map(|r| (r.domain.as_str(), r.account.as_str()))
|
||||
// This ensures uniqueness / prevents redundant logins
|
||||
.collect::<HashSet<_>>()
|
||||
{
|
||||
if domain.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let Some(registry_token) = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)? else {
|
||||
continue;
|
||||
};
|
||||
|
||||
res.insert(
|
||||
(domain.to_string(), account.to_string()),
|
||||
registry_token,
|
||||
);
|
||||
}
|
||||
|
||||
let registry_token = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?;
|
||||
|
||||
Ok(registry_token)
|
||||
Ok(
|
||||
res
|
||||
.into_iter()
|
||||
.map(|((domain, account), token)| (domain, account, token))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use komodo_client::{
|
||||
deployment::{
|
||||
Deployment, DeploymentImage, extract_registry_domain,
|
||||
},
|
||||
get_image_name, komodo_timestamp, optional_string,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, Update},
|
||||
@@ -115,8 +115,11 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
let (version, registry_token) = match &deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version
|
||||
} else {
|
||||
@@ -133,21 +136,27 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version_str}"),
|
||||
};
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
(version, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
} = first_registry;
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
deployment.config.image_registry_account = account
|
||||
deployment.config.image_registry_account =
|
||||
account.to_string();
|
||||
}
|
||||
let token = if !deployment
|
||||
.config
|
||||
.image_registry_account
|
||||
.is_empty()
|
||||
{
|
||||
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|
||||
registry_token(domain, &deployment.config.image_registry_account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
|
||||
)?
|
||||
} else {
|
||||
@@ -194,7 +203,8 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
update.version = version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal: self.stop_signal,
|
||||
@@ -213,7 +223,7 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
}
|
||||
};
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -240,8 +250,11 @@ pub async fn pull_deployment_inner(
|
||||
let (image, account, token) = match deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version.to_string()
|
||||
} else {
|
||||
@@ -255,26 +268,31 @@ pub async fn pull_deployment_inner(
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
let image = format!("{image_name}:{version}");
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
(image, None, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
} = first_registry;
|
||||
let account =
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
account
|
||||
} else {
|
||||
deployment.config.image_registry_account
|
||||
&deployment.config.image_registry_account
|
||||
};
|
||||
let token = if !account.is_empty() {
|
||||
registry_token(&domain, &account).await.with_context(
|
||||
registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(image, optional_string(&account), token)
|
||||
(image, optional_string(account), token)
|
||||
}
|
||||
}
|
||||
DeploymentImage::Image { image } => {
|
||||
@@ -314,8 +332,9 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
let res = async {
|
||||
let log = match periphery_client(server)?
|
||||
.request(api::image::PullImage {
|
||||
let log = match periphery_client(server)
|
||||
.await?
|
||||
.request(api::docker::PullImage {
|
||||
name: image,
|
||||
account,
|
||||
token,
|
||||
@@ -326,7 +345,7 @@ pub async fn pull_deployment_inner(
|
||||
Err(e) => Log::error("Pull image", format_serror(&e.into())),
|
||||
};
|
||||
|
||||
update_cache_for_server(server).await;
|
||||
update_cache_for_server(server, true).await;
|
||||
anyhow::Ok(log)
|
||||
}
|
||||
.await;
|
||||
@@ -397,7 +416,8 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -411,7 +431,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -444,7 +464,8 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RestartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -460,7 +481,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -493,7 +514,8 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::PauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -507,7 +529,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -540,7 +562,8 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::UnpauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -556,7 +579,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -589,7 +612,8 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -611,7 +635,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -671,7 +695,8 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -694,7 +719,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
use std::sync::OnceLock;
|
||||
use std::{fmt::Write as _, sync::OnceLock};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_command;
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use formatting::{bold, format_serror};
|
||||
use futures::StreamExt;
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
|
||||
RotateAllServerKeys,
|
||||
},
|
||||
entities::{
|
||||
deployment::DeploymentState, server::ServerState,
|
||||
@@ -24,6 +26,7 @@ use crate::{
|
||||
},
|
||||
config::core_config,
|
||||
helpers::update::update_update,
|
||||
resource::rotate_server_keys,
|
||||
state::{
|
||||
db_client, deployment_status_cache, server_status_cache,
|
||||
stack_status_cache,
|
||||
@@ -49,7 +52,7 @@ impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -124,7 +127,7 @@ impl Resolve<ExecuteArgs> for BackupCoreDatabase {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -173,7 +176,7 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -317,3 +320,98 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_rotate_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Rotate All Server Keys already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let mut servers = db_client()
|
||||
.servers
|
||||
.find(doc! { "config.enabled": true })
|
||||
.await
|
||||
.context("Failed to query servers from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
|
||||
let mut log = String::new();
|
||||
|
||||
while let Some(server) = servers.next().await {
|
||||
let server = match server {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
warn!("Failed to parse Server | {e:#}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let Some(status) = server_status_cache.get(&server.id).await
|
||||
else {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: No Status ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
};
|
||||
if !matches!(status.state, ServerState::Ok) {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: {} ⚠️",
|
||||
bold(&server.name),
|
||||
status.state
|
||||
);
|
||||
continue;
|
||||
}
|
||||
match rotate_server_keys(&server).await {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated keys for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate {} keys",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Server Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,6 +102,7 @@ pub enum ExecuteRequest {
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
RunStackService(RunStackService),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
@@ -139,6 +140,7 @@ pub enum ExecuteRequest {
|
||||
|
||||
// ==== ALERTER ====
|
||||
TestAlerter(TestAlerter),
|
||||
SendAlert(SendAlert),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
@@ -147,6 +149,7 @@ pub enum ExecuteRequest {
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
BackupCoreDatabase(BackupCoreDatabase),
|
||||
GlobalAutoUpdate(GlobalAutoUpdate),
|
||||
RotateAllServerKeys(RotateAllServerKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -219,24 +222,33 @@ pub fn inner_handler(
|
||||
));
|
||||
}
|
||||
|
||||
// Spawn a task for the execution which continues
|
||||
// running after this method returns.
|
||||
let handle =
|
||||
tokio::spawn(task(req_id, request, user, update.clone()));
|
||||
|
||||
// Spawns another task to monitor the first for failures,
|
||||
// and add the log to Update about it (which primary task can't do because it errored out)
|
||||
tokio::spawn({
|
||||
let update_id = update.id.clone();
|
||||
async move {
|
||||
let log = match handle.await {
|
||||
Ok(Err(e)) => {
|
||||
warn!("/execute request {req_id} task error: {e:#}",);
|
||||
Log::error("task error", format_serror(&e.into()))
|
||||
Log::error("Task Error", format_serror(&e.into()))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("/execute request {req_id} spawn error: {e:?}",);
|
||||
Log::error("spawn error", format!("{e:#?}"))
|
||||
Log::error("Spawn Error", format!("{e:#?}"))
|
||||
}
|
||||
_ => return,
|
||||
};
|
||||
let res = async {
|
||||
// Nothing to do if update was never actually created,
|
||||
// which is the case when the id is empty.
|
||||
if update_id.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let mut update =
|
||||
find_one_by_id(&db_client().updates, &update_id)
|
||||
.await
|
||||
|
||||
@@ -105,7 +105,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -220,7 +220,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -463,7 +463,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_builder_early_return(update, repo.id, repo.name, true).await
|
||||
@@ -510,7 +510,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(cleanup_data, &mut update).await;
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
|
||||
@@ -50,7 +50,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
@@ -66,7 +66,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -104,7 +104,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RestartContainer {
|
||||
@@ -122,7 +122,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -160,7 +160,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PauseContainer {
|
||||
@@ -176,7 +176,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -214,7 +214,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::UnpauseContainer {
|
||||
@@ -232,7 +232,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -270,7 +270,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
@@ -288,7 +288,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -332,7 +332,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
@@ -350,7 +350,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -387,7 +387,8 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StartAllContainers {})
|
||||
.await
|
||||
.context("failed to start all containers on host")?;
|
||||
@@ -401,7 +402,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -437,7 +438,8 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RestartAllContainers {})
|
||||
.await
|
||||
.context("failed to restart all containers on host")?;
|
||||
@@ -453,7 +455,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -489,7 +491,8 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::PauseAllContainers {})
|
||||
.await
|
||||
.context("failed to pause all containers on host")?;
|
||||
@@ -503,7 +506,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -539,7 +542,8 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::UnpauseAllContainers {})
|
||||
.await
|
||||
.context("failed to unpause all containers on host")?;
|
||||
@@ -555,7 +559,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -591,7 +595,8 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StopAllContainers {})
|
||||
.await
|
||||
.context("failed to stop all containers on host")?;
|
||||
@@ -605,7 +610,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -641,7 +646,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PruneContainers {})
|
||||
@@ -660,7 +665,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -686,10 +691,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::DeleteNetwork {
|
||||
.request(api::docker::DeleteNetwork {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -711,7 +716,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -748,10 +753,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::PruneNetworks {})
|
||||
.request(api::docker::PruneNetworks {})
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune networks on server {}",
|
||||
@@ -765,7 +770,7 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -791,10 +796,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::image::DeleteImage {
|
||||
.request(api::docker::DeleteImage {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -813,7 +818,7 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -850,10 +855,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::image::PruneImages {}).await {
|
||||
match periphery.request(api::docker::PruneImages {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune images",
|
||||
@@ -865,7 +870,7 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -891,10 +896,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::volume::DeleteVolume {
|
||||
.request(api::docker::DeleteVolume {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -916,7 +921,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -953,10 +958,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::volume::PruneVolumes {}).await {
|
||||
match periphery.request(api::docker::PruneVolumes {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune volumes",
|
||||
@@ -968,7 +973,7 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1005,7 +1010,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuilders {}).await {
|
||||
@@ -1020,7 +1025,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1057,7 +1062,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuildx {}).await {
|
||||
@@ -1072,7 +1077,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1109,7 +1114,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery.request(api::PruneSystem {}).await {
|
||||
Ok(log) => log,
|
||||
@@ -1123,7 +1128,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mungos::mongodb::bson::{doc, to_document};
|
||||
use database::mungos::mongodb::bson::{
|
||||
doc, oid::ObjectId, to_bson, to_document,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{execute::*, write::RefreshStackCache},
|
||||
entities::{
|
||||
FileContents,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::{Stack, StackInfo},
|
||||
stack::{
|
||||
Stack, StackFileRequires, StackInfo, StackRemoteFileContents,
|
||||
},
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::compose::*;
|
||||
@@ -21,7 +29,9 @@ use crate::{
|
||||
periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
stack_git_token,
|
||||
update::{add_update_without_send, update_update},
|
||||
update::{
|
||||
add_update_without_send, init_execution_update, update_update,
|
||||
},
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
permission::get_check_permissions,
|
||||
@@ -145,7 +155,8 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
compose_config,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
} = periphery_client(&server)?
|
||||
} = periphery_client(&server)
|
||||
.await?
|
||||
.request(ComposeUp {
|
||||
stack: stack.clone(),
|
||||
services: self.services,
|
||||
@@ -179,7 +190,15 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
) = if deployed {
|
||||
(
|
||||
Some(latest_services.clone()),
|
||||
Some(file_contents.clone()),
|
||||
Some(
|
||||
file_contents
|
||||
.iter()
|
||||
.map(|f| FileContents {
|
||||
path: f.path.clone(),
|
||||
contents: f.contents.clone(),
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
compose_config,
|
||||
commit_hash.clone(),
|
||||
commit_message.clone(),
|
||||
@@ -242,7 +261,7 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
}
|
||||
|
||||
// Ensure cached stack state up to date by updating server cache
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -289,62 +308,347 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
RefreshStackCache {
|
||||
stack: stack.id.clone(),
|
||||
}
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await?;
|
||||
|
||||
let stack = resource::get::<Stack>(&stack.id).await?;
|
||||
let changed = match (
|
||||
|
||||
let action = match (
|
||||
&stack.info.deployed_contents,
|
||||
&stack.info.remote_contents,
|
||||
) {
|
||||
(Some(deployed_contents), Some(latest_contents)) => {
|
||||
let changed = || {
|
||||
for latest in latest_contents {
|
||||
let Some(deployed) = deployed_contents
|
||||
.iter()
|
||||
.find(|c| c.path == latest.path)
|
||||
else {
|
||||
return true;
|
||||
};
|
||||
if latest.contents != deployed.contents {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
};
|
||||
changed()
|
||||
let services = stack
|
||||
.info
|
||||
.latest_services
|
||||
.iter()
|
||||
.map(|s| s.service_name.clone())
|
||||
.collect::<Vec<_>>();
|
||||
resolve_deploy_if_changed_action(
|
||||
deployed_contents,
|
||||
latest_contents,
|
||||
&services,
|
||||
)
|
||||
}
|
||||
(None, _) => true,
|
||||
_ => false,
|
||||
(None, _) => DeployIfChangedAction::FullDeploy,
|
||||
_ => DeployIfChangedAction::Services {
|
||||
deploy: Vec::new(),
|
||||
restart: Vec::new(),
|
||||
},
|
||||
};
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
if !changed {
|
||||
update.push_simple_log(
|
||||
"Diff compose files",
|
||||
String::from("Deploy cancelled after no changes detected."),
|
||||
);
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
match action {
|
||||
// Existing path pre 1.19.1
|
||||
DeployIfChangedAction::FullDeploy => {
|
||||
// Don't actually send it here, let the handler send it after it can set action state.
|
||||
// This is usually done in crate::helpers::update::init_execution_update.
|
||||
update.id = add_update_without_send(&update).await?;
|
||||
|
||||
// Don't actually send it here, let the handler send it after it can set action state.
|
||||
// This is usually done in crate::helpers::update::init_execution_update.
|
||||
update.id = add_update_without_send(&update).await?;
|
||||
DeployStack {
|
||||
stack: stack.name,
|
||||
services: Vec::new(),
|
||||
stop_time: self.stop_time,
|
||||
}
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
})
|
||||
.await
|
||||
}
|
||||
DeployIfChangedAction::FullRestart => {
|
||||
// For git repo based stacks, need to do a
|
||||
// PullStack in order to ensure latest repo contents on the
|
||||
// host before restart.
|
||||
maybe_pull_stack(&stack, Some(&mut update)).await?;
|
||||
|
||||
DeployStack {
|
||||
stack: stack.name,
|
||||
services: Vec::new(),
|
||||
stop_time: self.stop_time,
|
||||
let mut update =
|
||||
restart_services(stack.name, Vec::new(), user).await?;
|
||||
|
||||
if update.success {
|
||||
// Need to update 'info.deployed_contents' with the
|
||||
// latest contents so next check doesn't read the same diff.
|
||||
update_deployed_contents_with_latest(
|
||||
&stack.id,
|
||||
stack.info.remote_contents,
|
||||
&mut update,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
DeployIfChangedAction::Services { deploy, restart } => {
|
||||
match (deploy.is_empty(), restart.is_empty()) {
|
||||
// Both empty, nothing to do
|
||||
(true, true) => {
|
||||
update.push_simple_log(
|
||||
"Diff compose files",
|
||||
String::from(
|
||||
"Deploy cancelled after no changes detected.",
|
||||
),
|
||||
);
|
||||
update.finalize();
|
||||
Ok(update)
|
||||
}
|
||||
// Only restart
|
||||
(true, false) => {
|
||||
// For git repo based stacks, need to do a
|
||||
// PullStack in order to ensure latest repo contents on the
|
||||
// host before restart. Only necessary if no "deploys" (deploy already pulls stack).
|
||||
maybe_pull_stack(&stack, Some(&mut update)).await?;
|
||||
|
||||
let mut update =
|
||||
restart_services(stack.name, restart, user).await?;
|
||||
|
||||
if update.success {
|
||||
// Need to update 'info.deployed_contents' with the
|
||||
// latest contents so next check doesn't read the same diff.
|
||||
update_deployed_contents_with_latest(
|
||||
&stack.id,
|
||||
stack.info.remote_contents,
|
||||
&mut update,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
// Only deploy
|
||||
(false, true) => {
|
||||
deploy_services(stack.name, deploy, user).await
|
||||
}
|
||||
// Deploy then restart, returning non-db update with executed services.
|
||||
(false, false) => {
|
||||
update.push_simple_log(
|
||||
"Execute Deploys",
|
||||
format!("Deploying: {}", deploy.join(", "),),
|
||||
);
|
||||
// This already updates 'stack.info.deployed_services',
|
||||
// restart doesn't require this again.
|
||||
let deploy_update =
|
||||
deploy_services(stack.name.clone(), deploy, user)
|
||||
.await?;
|
||||
if !deploy_update.success {
|
||||
update.push_error_log(
|
||||
"Execute Deploys",
|
||||
String::from("There was a failure in service deploy"),
|
||||
);
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
update.push_simple_log(
|
||||
"Execute Restarts",
|
||||
format!("Restarting: {}", restart.join(", "),),
|
||||
);
|
||||
let restart_update =
|
||||
restart_services(stack.name, restart, user).await?;
|
||||
if !restart_update.success {
|
||||
update.push_error_log(
|
||||
"Execute Restarts",
|
||||
String::from(
|
||||
"There was a failure in a service restart",
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn deploy_services(
|
||||
stack: String,
|
||||
services: Vec<String>,
|
||||
user: &User,
|
||||
) -> serror::Result<Update> {
|
||||
// The existing update is initialized to DeployStack,
|
||||
// but also has not been created on database.
|
||||
// Setup a new update here.
|
||||
let req = ExecuteRequest::DeployStack(DeployStack {
|
||||
stack,
|
||||
services,
|
||||
stop_time: None,
|
||||
});
|
||||
let update = init_execution_update(&req, user).await?;
|
||||
let ExecuteRequest::DeployStack(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
req
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn restart_services(
|
||||
stack: String,
|
||||
services: Vec<String>,
|
||||
user: &User,
|
||||
) -> serror::Result<Update> {
|
||||
// The existing update is initialized to DeployStack,
|
||||
// but also has not been created on database.
|
||||
// Setup a new update here.
|
||||
let req =
|
||||
ExecuteRequest::RestartStack(RestartStack { stack, services });
|
||||
let update = init_execution_update(&req, user).await?;
|
||||
let ExecuteRequest::RestartStack(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
req
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// This can safely be called in [DeployStackIfChanged]
|
||||
/// when there are ONLY changes to config files requiring restart,
|
||||
/// AFTER the restart has been successfully completed.
|
||||
///
|
||||
/// In the case the if changed action is not FullDeploy,
|
||||
/// the only file diff possible is to config files.
|
||||
/// Also note either full or service deploy will already update 'deployed_contents'
|
||||
/// making this method unnecessary in those cases.
|
||||
///
|
||||
/// Changes to config files after restart is applied should
|
||||
/// be taken as the deployed contents, otherwise next changed check
|
||||
/// will restart service again for no reason.
|
||||
async fn update_deployed_contents_with_latest(
|
||||
id: &str,
|
||||
contents: Option<Vec<StackRemoteFileContents>>,
|
||||
update: &mut Update,
|
||||
) {
|
||||
let Some(contents) = contents else {
|
||||
return;
|
||||
};
|
||||
let contents = contents
|
||||
.into_iter()
|
||||
.map(|f| FileContents {
|
||||
path: f.path,
|
||||
contents: f.contents,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if let Err(e) = (async {
|
||||
let contents = to_bson(&contents)
|
||||
.context("Failed to serialize contents to bson")?;
|
||||
let id =
|
||||
ObjectId::from_str(id).context("Id is not valid ObjectId")?;
|
||||
db_client()
|
||||
.stacks
|
||||
.update_one(
|
||||
doc! { "_id": id },
|
||||
doc! { "$set": { "info.deployed_contents": contents } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update stack 'deployed_contents'")?;
|
||||
anyhow::Ok(())
|
||||
})
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Update content cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
update.finalize();
|
||||
let _ = update_update(update.clone()).await;
|
||||
}
|
||||
}
|
||||
|
||||
enum DeployIfChangedAction {
|
||||
/// Changes to any compose or env files
|
||||
/// always lead to this.
|
||||
FullDeploy,
|
||||
/// If the above is not met, then changes to
|
||||
/// any changed additional file with `requires = "Restart"`
|
||||
/// and empty services array will lead to this.
|
||||
FullRestart,
|
||||
/// If all changed additional files have specific services
|
||||
/// they depend on, collect the final necessary
|
||||
/// services to deploy / restart.
|
||||
/// If eg `deploy` is empty, no services will be redeployed, same for `restart`.
|
||||
/// If both are empty, nothing is to be done.
|
||||
Services {
|
||||
deploy: Vec<String>,
|
||||
restart: Vec<String>,
|
||||
},
|
||||
}
|
||||
|
||||
fn resolve_deploy_if_changed_action(
|
||||
deployed_contents: &[FileContents],
|
||||
latest_contents: &[StackRemoteFileContents],
|
||||
all_services: &[String],
|
||||
) -> DeployIfChangedAction {
|
||||
let mut full_restart = false;
|
||||
let mut deploy = HashSet::<String>::new();
|
||||
let mut restart = HashSet::<String>::new();
|
||||
|
||||
for latest in latest_contents {
|
||||
let Some(deployed) =
|
||||
deployed_contents.iter().find(|c| c.path == latest.path)
|
||||
else {
|
||||
// If file doesn't exist in deployed contents, do full
|
||||
// deploy to align this.
|
||||
return DeployIfChangedAction::FullDeploy;
|
||||
};
|
||||
// Ignore unchanged files
|
||||
if latest.contents == deployed.contents {
|
||||
continue;
|
||||
}
|
||||
match (latest.requires, latest.services.is_empty()) {
|
||||
(StackFileRequires::Redeploy, true) => {
|
||||
// File has requires = "Redeploy" at global level.
|
||||
// Can do early return here.
|
||||
return DeployIfChangedAction::FullDeploy;
|
||||
}
|
||||
(StackFileRequires::Redeploy, false) => {
|
||||
// Requires redeploy on specific services
|
||||
deploy.extend(latest.services.clone());
|
||||
}
|
||||
(StackFileRequires::Restart, true) => {
|
||||
// Services empty -> Full restart
|
||||
full_restart = true;
|
||||
}
|
||||
(StackFileRequires::Restart, false) => {
|
||||
restart.extend(latest.services.clone());
|
||||
}
|
||||
(StackFileRequires::None, _) => {
|
||||
// File can be ignored even with changes.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match (full_restart, deploy.is_empty()) {
|
||||
// Full restart required with NO deploys needed -> Full Restart
|
||||
(true, true) => DeployIfChangedAction::FullRestart,
|
||||
// Full restart required WITH deploys needed -> Deploy those, restart all others
|
||||
(true, false) => DeployIfChangedAction::Services {
|
||||
restart: all_services
|
||||
.iter()
|
||||
// Only keep ones that don't need deploy
|
||||
.filter(|&s| !deploy.contains(s))
|
||||
.cloned()
|
||||
.collect(),
|
||||
deploy: deploy.into_iter().collect(),
|
||||
},
|
||||
// No full restart needed -> Deploy / restart as. pickedup.
|
||||
(false, _) => DeployIfChangedAction::Services {
|
||||
deploy: deploy.into_iter().collect(),
|
||||
restart: restart.into_iter().collect(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -371,6 +675,31 @@ impl Resolve<ExecuteArgs> for BatchPullStack {
|
||||
}
|
||||
}
|
||||
|
||||
async fn maybe_pull_stack(
|
||||
stack: &Stack,
|
||||
update: Option<&mut Update>,
|
||||
) -> anyhow::Result<()> {
|
||||
if stack.config.files_on_host
|
||||
|| (stack.config.repo.is_empty()
|
||||
&& stack.config.linked_repo.is_empty())
|
||||
{
|
||||
// Not repo based, no pull necessary
|
||||
return Ok(());
|
||||
}
|
||||
let server =
|
||||
resource::get::<Server>(&stack.config.server_id).await?;
|
||||
let repo = if stack.config.repo.is_empty()
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
Some(resource::get::<Repo>(&stack.config.linked_repo).await?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
pull_stack_inner(stack.clone(), Vec::new(), &server, repo, update)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn pull_stack_inner(
|
||||
mut stack: Stack,
|
||||
services: Vec<String>,
|
||||
@@ -421,7 +750,8 @@ pub async fn pull_stack_inner(
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let res = periphery_client(server)?
|
||||
let res = periphery_client(server)
|
||||
.await?
|
||||
.request(ComposePull {
|
||||
stack,
|
||||
services,
|
||||
@@ -433,7 +763,7 @@ pub async fn pull_stack_inner(
|
||||
.await?;
|
||||
|
||||
// Ensure cached stack state up to date by updating server cache
|
||||
update_cache_for_server(server).await;
|
||||
update_cache_for_server(server, true).await;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
@@ -630,3 +960,96 @@ impl Resolve<ExecuteArgs> for DestroyStack {
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunStackService {
|
||||
#[instrument(name = "RunStackService", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let action_state =
|
||||
action_states().stack.get_or_insert_default(&stack.id).await;
|
||||
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.deploying = true)?;
|
||||
|
||||
let mut update = update.clone();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token =
|
||||
stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
|
||||
let registry_token = crate::helpers::registry_token(
|
||||
&stack.config.registry_provider,
|
||||
&stack.config.registry_account,
|
||||
).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {} | {}", stack.config.registry_provider, stack.config.registry_account),
|
||||
)?;
|
||||
|
||||
let secret_replacers = if !stack.config.skip_secret_interp {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_stack(&mut stack)?;
|
||||
if let Some(repo) = repo.as_mut()
|
||||
&& !repo.config.skip_secret_interp
|
||||
{
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let log = periphery_client(&server)
|
||||
.await?
|
||||
.request(ComposeRun {
|
||||
stack,
|
||||
repo,
|
||||
git_token,
|
||||
registry_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
service: self.service,
|
||||
command: self.command,
|
||||
no_tty: self.no_tty,
|
||||
no_deps: self.no_deps,
|
||||
detach: self.detach,
|
||||
service_ports: self.service_ports,
|
||||
env: self.env,
|
||||
workdir: self.workdir,
|
||||
user: self.user,
|
||||
entrypoint: self.entrypoint,
|
||||
pull: self.pull,
|
||||
})
|
||||
.await?;
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,10 +77,8 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
};
|
||||
|
||||
// get the action state for the sync (or insert default).
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.get_or_insert_default(&sync.id)
|
||||
.await;
|
||||
let action_state =
|
||||
action_states().sync.get_or_insert_default(&sync.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure sync not already busy before updating.
|
||||
|
||||
@@ -131,8 +131,8 @@ impl Resolve<ReadArgs> for GetActionsSummary {
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.running => {
|
||||
res.running += 1;
|
||||
(_, action_states) if action_states.running > 0 => {
|
||||
res.running += action_states.running;
|
||||
}
|
||||
(ActionState::Ok, _) => res.ok += 1,
|
||||
(ActionState::Failed, _) => res.failed += 1,
|
||||
|
||||
@@ -145,7 +145,8 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerLog {
|
||||
name,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -183,7 +184,8 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerLogSearch {
|
||||
name,
|
||||
terms,
|
||||
@@ -234,7 +236,8 @@ impl Resolve<ReadArgs> for InspectDeploymentContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -262,7 +265,8 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
);
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerStats { name })
|
||||
.await
|
||||
.context("failed to get stats from periphery")?;
|
||||
@@ -321,7 +325,9 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
|
||||
res.not_deployed += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
res.unknown += 1;
|
||||
if !deployment.template {
|
||||
res.unknown += 1;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
res.unhealthy += 1;
|
||||
|
||||
@@ -27,7 +27,9 @@ use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, config::core_config, helpers::periphery_client,
|
||||
auth::auth_request,
|
||||
config::{core_config, core_public_key},
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
};
|
||||
|
||||
@@ -39,6 +41,7 @@ mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod permission;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
@@ -106,27 +109,29 @@ enum ReadRequest {
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetPeripheryInformation(GetPeripheryInformation),
|
||||
GetServerActionState(GetServerActionState),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
ListServers(ListServers),
|
||||
ListFullServers(ListFullServers),
|
||||
ListTerminals(ListTerminals),
|
||||
|
||||
// ==== DOCKER ====
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
InspectDockerContainer(InspectDockerContainer),
|
||||
GetResourceMatchingContainer(GetResourceMatchingContainer),
|
||||
GetContainerLog(GetContainerLog),
|
||||
SearchContainerLog(SearchContainerLog),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
InspectDockerNetwork(InspectDockerNetwork),
|
||||
ListDockerImages(ListDockerImages),
|
||||
InspectDockerImage(InspectDockerImage),
|
||||
ListDockerImageHistory(ListDockerImageHistory),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
ListDockerImages(ListDockerImages),
|
||||
ListDockerVolumes(ListDockerVolumes),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
ListTerminals(ListTerminals),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
@@ -224,6 +229,9 @@ enum ReadRequest {
|
||||
ListGitProviderAccounts(ListGitProviderAccounts),
|
||||
GetDockerRegistryAccount(GetDockerRegistryAccount),
|
||||
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
ListOnboardingKeys(ListOnboardingKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -298,6 +306,7 @@ fn core_info() -> &'static GetCoreInfoResponse {
|
||||
.map(|i| i.namespace.to_string())
|
||||
.collect(),
|
||||
timezone: config.timezone.clone(),
|
||||
public_key: core_public_key().to_string(),
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -343,7 +352,8 @@ impl Resolve<ReadArgs> for ListSecrets {
|
||||
};
|
||||
if let Some(id) = server_id {
|
||||
let server = resource::get::<Server>(&id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListSecrets {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -515,7 +525,8 @@ async fn merge_git_providers_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListGitProviders {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -553,7 +564,8 @@ async fn merge_docker_registries_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListDockerRegistries {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
|
||||
30
bin/core/src/api/read/onboarding_key.rs
Normal file
30
bin/core/src/api/read/onboarding_key.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::api::read::{
|
||||
ListOnboardingKeys, ListOnboardingKeysResponse,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::read::ReadArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ReadArgs> for ListOnboardingKeys {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user: admin }: &ReadArgs,
|
||||
) -> serror::Result<ListOnboardingKeysResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
find_collect(&db_client().onboarding_keys, None, None)
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -142,7 +142,11 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
(RepoState::Unknown, _) => {
|
||||
if !repo.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _)
|
||||
| (RepoState::Pulling, _)
|
||||
|
||||
@@ -39,18 +39,17 @@ use komodo_client::{
|
||||
use periphery_client::api::{
|
||||
self as periphery,
|
||||
container::InspectContainer,
|
||||
image::{ImageHistory, InspectImage},
|
||||
network::InspectNetwork,
|
||||
volume::InspectVolume,
|
||||
docker::{
|
||||
ImageHistory, InspectImage, InspectNetwork, InspectVolume,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::{get_all_tags, get_system_info},
|
||||
},
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::compose_container_match_regex,
|
||||
@@ -71,18 +70,29 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let core_version = env!("CARGO_PKG_VERSION");
|
||||
let mut res = GetServersSummaryResponse::default();
|
||||
|
||||
for server in servers {
|
||||
res.total += 1;
|
||||
match server.info.state {
|
||||
ServerState::Ok => {
|
||||
res.healthy += 1;
|
||||
// Check for version mismatch
|
||||
if matches!(&server.info.version, Some(version) if version != core_version)
|
||||
{
|
||||
res.warning += 1;
|
||||
} else {
|
||||
res.healthy += 1;
|
||||
}
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
ServerState::Disabled => {
|
||||
res.disabled += 1;
|
||||
if !server.template {
|
||||
res.disabled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -90,26 +100,6 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryVersion {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryVersionResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let version = server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("unknown"));
|
||||
Ok(GetPeripheryVersionResponse { version })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServer {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -213,6 +203,29 @@ impl Resolve<ReadArgs> for GetServerActionState {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryInformation {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryInformationResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.periphery_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing Periphery Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -223,8 +236,17 @@ impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
get_system_info(&server).await.map_err(Into::into)
|
||||
.await
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing system Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,15 +261,15 @@ impl Resolve<ReadArgs> for GetSystemStats {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let status =
|
||||
server_status_cache().get(&server.id).await.with_context(
|
||||
|| format!("did not find status for server at {}", server.id),
|
||||
)?;
|
||||
let stats = status
|
||||
.stats
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.context("server stats not available")?;
|
||||
Ok(stats.clone())
|
||||
.cloned()
|
||||
.context("Server status missing system stats. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,7 +299,8 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
let stats = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::stats::GetSystemProcesses {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
@@ -466,7 +489,8 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer {
|
||||
name: self.container,
|
||||
})
|
||||
@@ -494,7 +518,8 @@ impl Resolve<ReadArgs> for GetContainerLog {
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::container::GetContainerLog {
|
||||
name: container,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -525,7 +550,8 @@ impl Resolve<ReadArgs> for SearchContainerLog {
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::container::GetContainerLogSearch {
|
||||
name: container,
|
||||
terms,
|
||||
@@ -645,7 +671,8 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectNetwork { name: self.network })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -694,7 +721,8 @@ impl Resolve<ReadArgs> for InspectDockerImage {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectImage { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -724,7 +752,8 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(ImageHistory { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -773,7 +802,8 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectVolume { name: self.volume })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -853,7 +883,8 @@ impl Resolve<ReadArgs> for ListTerminals {
|
||||
let cache = terminals_cache().get_or_insert(server.id.clone());
|
||||
let mut cache = cache.lock().await;
|
||||
if self.fresh || komodo_timestamp() > cache.ttl {
|
||||
cache.list = periphery_client(&server)?
|
||||
cache.list = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::terminal::ListTerminals {})
|
||||
.await
|
||||
.context("Failed to get fresh terminal list")?;
|
||||
|
||||
@@ -89,7 +89,8 @@ impl Resolve<ReadArgs> for GetStackLog {
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLog {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
@@ -122,7 +123,8 @@ impl Resolve<ReadArgs> for SearchStackLog {
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLogSearch {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
@@ -184,7 +186,8 @@ impl Resolve<ReadArgs> for InspectStackContainer {
|
||||
"No service found matching '{service}'. Was the stack last deployed manually?"
|
||||
).into());
|
||||
};
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -363,7 +366,11 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
StackState::Running => res.running += 1,
|
||||
StackState::Stopped | StackState::Paused => res.stopped += 1,
|
||||
StackState::Down => res.down += 1,
|
||||
StackState::Unknown => res.unknown += 1,
|
||||
StackState::Unknown => {
|
||||
if !stack.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
_ => res.unhealthy += 1,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(&sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -138,7 +138,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
continue;
|
||||
}
|
||||
if action_states
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(&resource_sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
|
||||
@@ -54,34 +54,20 @@ async fn execute_terminal_inner(
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let stream = periphery_client(&server)
|
||||
.await?
|
||||
.execute_terminal(terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on periphery")?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_terminal(terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on periphery")?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal/execute request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
// ======================
|
||||
@@ -112,43 +98,25 @@ async fn execute_container_exec_inner(
|
||||
}: ExecuteContainerExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!(
|
||||
"/terminal/execute/container request | user: {}",
|
||||
user.username
|
||||
);
|
||||
info!("ExecuteContainerExec request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"/terminal/execute/container request {req_id} error: {e:#}"
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
// =======================
|
||||
@@ -178,45 +146,27 @@ async fn execute_deployment_exec_inner(
|
||||
}: ExecuteDeploymentExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!(
|
||||
"/terminal/execute/deployment request | user: {}",
|
||||
user.username
|
||||
);
|
||||
info!("ExecuteDeploymentExec request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&deployment.config.server_id).await?;
|
||||
let server = get::<Server>(&deployment.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(deployment.name, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
let stream = periphery
|
||||
.execute_container_exec(deployment.name, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"/terminal/execute/deployment request {req_id} error: {e:#}"
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
// ==================
|
||||
@@ -247,53 +197,40 @@ async fn execute_stack_exec_inner(
|
||||
}: ExecuteStackExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute/stack request | user: {}", user.username);
|
||||
info!("ExecuteStackExec request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&stack.config.server_id).await?;
|
||||
let server = get::<Server>(&stack.config.server_id).await?;
|
||||
|
||||
let container = stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.context("could not get stack status")?
|
||||
.curr
|
||||
.services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.context("could not find service")?
|
||||
.container
|
||||
.as_ref()
|
||||
.context("could not find service container")?
|
||||
.name
|
||||
.clone();
|
||||
let container = stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.context("could not get stack status")?
|
||||
.curr
|
||||
.services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.context("could not find service")?
|
||||
.container
|
||||
.as_ref()
|
||||
.context("could not find service container")?
|
||||
.name
|
||||
.clone();
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal/execute/stack request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
@@ -16,10 +16,8 @@ impl Resolve<WriteArgs> for CreateAction {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Action>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +33,8 @@ impl Resolve<WriteArgs> for CopyAction {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Action>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,8 @@ impl Resolve<WriteArgs> for CreateAlerter {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Alerter>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +33,8 @@ impl Resolve<WriteArgs> for CopyAlerter {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Alerter>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use std::{path::PathBuf, str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::mongodb::bson::to_document;
|
||||
use database::{
|
||||
mongo_indexed::doc, mungos::mongodb::bson::oid::ObjectId,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
@@ -21,22 +23,21 @@ use komodo_client::{
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::{
|
||||
PeripheryClient,
|
||||
api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
},
|
||||
use periphery_client::api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
connection::PeripheryConnectionArgs,
|
||||
helpers::{
|
||||
git_token, periphery_client,
|
||||
query::get_server_with_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
periphery::PeripheryClient,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{db_client, github_client},
|
||||
@@ -50,10 +51,8 @@ impl Resolve<WriteArgs> for CreateBuild {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Build>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,10 +70,8 @@ impl Resolve<WriteArgs> for CopyBuild {
|
||||
.await?;
|
||||
// reset version to 0.0.0
|
||||
config.version = Default::default();
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Build>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +183,7 @@ async fn write_dockerfile_contents_git(
|
||||
) -> serror::Result<Update> {
|
||||
let WriteBuildFileContents { build: _, contents } = req;
|
||||
|
||||
let mut clone_args: RepoExecutionArgs = if !build
|
||||
let mut repo_args: RepoExecutionArgs = if !build
|
||||
.config
|
||||
.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
@@ -196,8 +193,8 @@ async fn write_dockerfile_contents_git(
|
||||
} else {
|
||||
(&build).into()
|
||||
};
|
||||
let root = clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(root.display().to_string());
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let build_path = build
|
||||
.config
|
||||
@@ -220,11 +217,11 @@ async fn write_dockerfile_contents_git(
|
||||
})?;
|
||||
}
|
||||
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
let access_token = if let Some(account) = &repo_args.account {
|
||||
git_token(&repo_args.provider, account, |https| repo_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
@@ -235,7 +232,7 @@ async fn write_dockerfile_contents_git(
|
||||
if !root.join(".git").exists() {
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&clone_args,
|
||||
&repo_args,
|
||||
access_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
@@ -249,9 +246,11 @@ async fn write_dockerfile_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
clone_args,
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
)
|
||||
@@ -298,7 +297,7 @@ async fn write_dockerfile_contents_git(
|
||||
&format!("{}: Commit Dockerfile", args.user.username),
|
||||
&root,
|
||||
&build_path.join(&dockerfile_path),
|
||||
&build.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -432,13 +431,27 @@ async fn get_on_host_periphery(
|
||||
Err(anyhow!("Files on host doesn't work with AWS builder"))
|
||||
}
|
||||
BuilderConfig::Url(config) => {
|
||||
// TODO: Ensure connection is actually established.
|
||||
// Builder id no good because it may be active for multiple connections.
|
||||
let periphery = PeripheryClient::new(
|
||||
config.address,
|
||||
config.passkey,
|
||||
Duration::from_secs(3),
|
||||
);
|
||||
periphery.health_check().await?;
|
||||
Ok(periphery)
|
||||
PeripheryConnectionArgs::from_url_builder(
|
||||
&ObjectId::new().to_hex(),
|
||||
&config,
|
||||
),
|
||||
config.insecure_tls,
|
||||
&config.passkey,
|
||||
)
|
||||
.await?;
|
||||
// Poll for connection to be estalished
|
||||
let mut err = None;
|
||||
for _ in 0..10 {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
match periphery.health_check().await {
|
||||
Ok(_) => return Ok(periphery),
|
||||
Err(e) => err = Some(e),
|
||||
};
|
||||
}
|
||||
Err(err.context("Missing error")?)
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
@@ -453,7 +466,7 @@ async fn get_on_host_periphery(
|
||||
"Builder server is disabled or not reachable"
|
||||
));
|
||||
};
|
||||
periphery_client(&server)
|
||||
periphery_client(&server).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,8 @@ impl Resolve<WriteArgs> for CreateBuilder {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Builder>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -35,10 +33,8 @@ impl Resolve<WriteArgs> for CopyBuilder {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Builder>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,10 +38,13 @@ impl Resolve<WriteArgs> for CreateDeployment {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, self.config, user)
|
||||
.await?,
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
self.config,
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,10 +61,13 @@ impl Resolve<WriteArgs> for CopyDeployment {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +95,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let container = periphery_client(&server)?
|
||||
let container = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
@@ -153,10 +160,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
});
|
||||
}
|
||||
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Deployment>(&self.name, config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,7 +243,8 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
if container_state != DeploymentState::NotDeployed {
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
let log = periphery_client(&server)?
|
||||
let log = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RenameContainer {
|
||||
curr_name: deployment.name.clone(),
|
||||
new_name: name.clone(),
|
||||
|
||||
@@ -23,6 +23,7 @@ mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
@@ -91,6 +92,8 @@ pub enum WriteRequest {
|
||||
CreateTerminal(CreateTerminal),
|
||||
DeleteTerminal(DeleteTerminal),
|
||||
DeleteAllTerminals(DeleteAllTerminals),
|
||||
UpdateServerPublicKey(UpdateServerPublicKey),
|
||||
RotateServerKeys(RotateServerKeys),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
@@ -185,13 +188,18 @@ pub enum WriteRequest {
|
||||
UpdateVariableIsSecret(UpdateVariableIsSecret),
|
||||
DeleteVariable(DeleteVariable),
|
||||
|
||||
// ==== PROVIDERS ====
|
||||
// ==== PROVIDER ====
|
||||
CreateGitProviderAccount(CreateGitProviderAccount),
|
||||
UpdateGitProviderAccount(UpdateGitProviderAccount),
|
||||
DeleteGitProviderAccount(DeleteGitProviderAccount),
|
||||
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
|
||||
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
|
||||
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
CreateOnboardingKey(CreateOnboardingKey),
|
||||
UpdateOnboardingKey(UpdateOnboardingKey),
|
||||
DeleteOnboardingKey(DeleteOnboardingKey),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
|
||||
168
bin/core/src/api/write/onboarding_key.rs
Normal file
168
bin/core/src/api/write/onboarding_key.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateOnboardingKey, CreateOnboardingKeyResponse,
|
||||
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
|
||||
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
|
||||
},
|
||||
entities::{komodo_timestamp, onboarding_key::OnboardingKey},
|
||||
};
|
||||
use noise::key::EncodedKeyPair;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCode, AddStatusCodeError};
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateOnboardingKey {
|
||||
#[instrument(name = "CreateServerOnboardingKey", skip(self, admin))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<CreateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let keys = if let Some(private_key) = self.private_key {
|
||||
EncodedKeyPair::from_private_key(&private_key)?
|
||||
} else {
|
||||
EncodedKeyPair::generate()?
|
||||
};
|
||||
let onboarding_key = OnboardingKey {
|
||||
public_key: keys.public.into_inner(),
|
||||
name: self.name,
|
||||
enabled: true,
|
||||
onboarded: Default::default(),
|
||||
created_at: komodo_timestamp(),
|
||||
expires: self.expires,
|
||||
tags: self.tags,
|
||||
copy_server: self.copy_server,
|
||||
create_builder: self.create_builder,
|
||||
};
|
||||
let db = db_client();
|
||||
// Create the key
|
||||
db.onboarding_keys
|
||||
.insert_one(&onboarding_key)
|
||||
.await
|
||||
.context(
|
||||
"Failed to create Server onboarding key on database",
|
||||
)?;
|
||||
let created = db
|
||||
.onboarding_keys
|
||||
.find_one(doc! { "public_key": &onboarding_key.public_key })
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context(
|
||||
"No Server onboarding key found on database after create",
|
||||
)?;
|
||||
Ok(CreateOnboardingKeyResponse {
|
||||
private_key: keys.private.into_inner(),
|
||||
created,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateOnboardingKey {
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
|
||||
// No changes
|
||||
if self.is_none() {
|
||||
return db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
let mut update = Document::new();
|
||||
|
||||
if let Some(enabled) = self.enabled {
|
||||
update.insert("enabled", enabled);
|
||||
}
|
||||
|
||||
if let Some(name) = self.name {
|
||||
update.insert("name", name);
|
||||
}
|
||||
|
||||
if let Some(expires) = self.expires {
|
||||
update.insert("expires", expires);
|
||||
}
|
||||
|
||||
if let Some(tags) = self.tags {
|
||||
update.insert("tags", tags);
|
||||
}
|
||||
|
||||
if let Some(copy_server) = self.copy_server {
|
||||
update.insert("copy_server", copy_server);
|
||||
}
|
||||
|
||||
if let Some(create_builder) = self.create_builder {
|
||||
update.insert("create_builder", create_builder);
|
||||
}
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.update_one(query.clone(), doc! { "$set": update })
|
||||
.await
|
||||
.context("Failed to update onboarding key on database")?;
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteOnboardingKey {
|
||||
#[instrument(name = "DeleteServerOnboardingKey", skip(admin))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<DeleteOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let db = db_client();
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
let creation_key = db
|
||||
.onboarding_keys
|
||||
.find_one(query.clone())
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context("Server onboarding key matching provided public key not found")
|
||||
.status_code(StatusCode::NOT_FOUND)?;
|
||||
db.onboarding_keys.delete_one(query).await.context(
|
||||
"Failed to delete Server onboarding key from database",
|
||||
)?;
|
||||
Ok(creation_key)
|
||||
}
|
||||
}
|
||||
@@ -16,10 +16,8 @@ impl Resolve<WriteArgs> for CreateProcedure {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateProcedureResponse> {
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Procedure>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,10 +34,13 @@ impl Resolve<WriteArgs> for CopyProcedure {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
resource::create::<Procedure>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,8 @@ impl Resolve<WriteArgs> for CreateRepo {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(resource::create::<Repo>(&self.name, self.config, user).await?)
|
||||
resource::create::<Repo>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,10 +59,8 @@ impl Resolve<WriteArgs> for CopyRepo {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Repo>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Repo>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +129,8 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::git::RenameRepo {
|
||||
curr_name: to_path_compatible_name(&repo.name),
|
||||
new_name: name.clone(),
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use formatting::{bold, format_serror};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
NoData, Operation,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
server::{Server, ServerInfo},
|
||||
to_docker_compatible_name,
|
||||
update::{Update, UpdateStatus},
|
||||
},
|
||||
@@ -19,7 +19,7 @@ use crate::{
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
resource::{self, update_server_public_key},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
@@ -30,10 +30,16 @@ impl Resolve<WriteArgs> for CreateServer {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(
|
||||
resource::create::<Server>(&self.name, self.config, user)
|
||||
.await?,
|
||||
resource::create::<Server>(
|
||||
&self.name,
|
||||
self.config,
|
||||
self.public_key.map(|public_key| ServerInfo {
|
||||
public_key,
|
||||
..Default::default()
|
||||
}),
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,10 +55,17 @@ impl Resolve<WriteArgs> for CopyServer {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Server>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
|
||||
resource::create::<Server>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
self.public_key.map(|public_key| ServerInfo {
|
||||
public_key,
|
||||
..Default::default()
|
||||
}),
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,7 +109,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::CreateNetwork, user);
|
||||
@@ -104,7 +117,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
match periphery
|
||||
.request(api::network::CreateNetwork {
|
||||
.request(api::docker::CreateNetwork {
|
||||
name: to_docker_compatible_name(&self.name),
|
||||
driver: None,
|
||||
})
|
||||
@@ -113,7 +126,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => update.push_error_log(
|
||||
"create network",
|
||||
format_serror(&e.context("failed to create network").into()),
|
||||
format_serror(&e.context("Failed to create network").into()),
|
||||
),
|
||||
};
|
||||
|
||||
@@ -137,7 +150,7 @@ impl Resolve<WriteArgs> for CreateTerminal {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::CreateTerminal {
|
||||
@@ -146,7 +159,7 @@ impl Resolve<WriteArgs> for CreateTerminal {
|
||||
recreate: self.recreate,
|
||||
})
|
||||
.await
|
||||
.context("Failed to create terminal on periphery")?;
|
||||
.context("Failed to create terminal on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
@@ -165,14 +178,14 @@ impl Resolve<WriteArgs> for DeleteTerminal {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteTerminal {
|
||||
terminal: self.terminal,
|
||||
})
|
||||
.await
|
||||
.context("Failed to delete terminal on periphery")?;
|
||||
.context("Failed to delete terminal on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
@@ -191,13 +204,76 @@ impl Resolve<WriteArgs> for DeleteAllTerminals {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteAllTerminals {})
|
||||
.await
|
||||
.context("Failed to delete all terminals on periphery")?;
|
||||
.context("Failed to delete all terminals on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServerPublicKey {
|
||||
#[instrument(name = "UpdateServerPublicKey", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
update_server_public_key(&server.id, &self.public_key).await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::UpdateServerKey, &args.user);
|
||||
|
||||
update.push_simple_log(
|
||||
"Update Server Public Key",
|
||||
format!("Public key updated to {}", bold(&self.public_key)),
|
||||
);
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for RotateServerKeys {
|
||||
#[instrument(name = "RotateServerPrivateKey", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let public_key = periphery
|
||||
.request(api::keys::RotatePrivateKey {})
|
||||
.await
|
||||
.context("Failed to rotate Periphery private key")?
|
||||
.public_key;
|
||||
|
||||
UpdateServerPublicKey {
|
||||
server: server.id,
|
||||
public_key,
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,10 +51,8 @@ impl Resolve<WriteArgs> for CreateStack {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(
|
||||
resource::create::<Stack>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Stack>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,10 +68,9 @@ impl Resolve<WriteArgs> for CopyStack {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Stack>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
|
||||
resource::create::<Stack>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,7 +172,8 @@ async fn write_stack_file_contents_on_host(
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
@@ -284,6 +282,8 @@ async fn write_stack_file_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
@@ -334,7 +334,7 @@ async fn write_stack_file_contents_git(
|
||||
&format!("{username}: Write Stack File"),
|
||||
&root,
|
||||
&file_path,
|
||||
&stack.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -427,9 +427,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
(vec![], None, None, None, None)
|
||||
} else if let Some(server) = server {
|
||||
let GetComposeContentsOnHostResponse { contents, errors } =
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeContentsOnHost {
|
||||
file_paths: stack.file_paths().to_vec(),
|
||||
file_paths: stack.all_file_dependencies(),
|
||||
name: stack.name.clone(),
|
||||
run_directory: stack.config.run_directory.clone(),
|
||||
})
|
||||
@@ -451,6 +452,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &contents {
|
||||
// Don't include additional files in service parsing
|
||||
if !stack.is_compose_file(&contents.path) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
@@ -489,6 +494,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &remote_contents {
|
||||
// Don't include additional files in service parsing
|
||||
if !stack.is_compose_file(&contents.path) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
|
||||
@@ -68,10 +68,13 @@ impl Resolve<WriteArgs> for CreateResourceSync {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ResourceSync> {
|
||||
Ok(
|
||||
resource::create::<ResourceSync>(&self.name, self.config, user)
|
||||
.await?,
|
||||
resource::create::<ResourceSync>(
|
||||
&self.name,
|
||||
self.config,
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,14 +91,13 @@ impl Resolve<WriteArgs> for CopyResourceSync {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<ResourceSync>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
resource::create::<ResourceSync>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,6 +325,8 @@ async fn write_sync_file_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
@@ -373,7 +377,7 @@ async fn write_sync_file_contents_git(
|
||||
&format!("{}: Commit Resource File", args.user.username),
|
||||
&root,
|
||||
&resource_path.join(&file_path),
|
||||
&sync.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
|
||||
@@ -13,9 +13,12 @@ use komodo_client::{
|
||||
server::Server, stack::Stack, sync::ResourceSync, tag::Tag,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::{get_tag, get_tag_check_owner},
|
||||
resource,
|
||||
state::db_client,
|
||||
@@ -29,8 +32,18 @@ impl Resolve<WriteArgs> for CreateTag {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Tag> {
|
||||
if core_config().disable_non_admin_create && !user.admin {
|
||||
return Err(
|
||||
anyhow!("Non admins cannot create tags")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if ObjectId::from_str(&self.name).is_ok() {
|
||||
return Err(anyhow!("tag name cannot be ObjectId").into());
|
||||
return Err(
|
||||
anyhow!("Tag name cannot be ObjectId")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let mut tag = Tag {
|
||||
|
||||
@@ -32,7 +32,7 @@ impl Resolve<WriteArgs> for CreateLocalUser {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin-only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ impl Resolve<WriteArgs> for DeleteUser {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin-only.")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
if admin.username == self.user || admin.id == self.user {
|
||||
@@ -220,6 +220,14 @@ impl Resolve<WriteArgs> for DeleteUser {
|
||||
.delete_one(query)
|
||||
.await
|
||||
.context("Failed to delete user from database")?;
|
||||
// Also remove user id from all user groups
|
||||
if let Err(e) = db
|
||||
.user_groups
|
||||
.update_many(doc! {}, doc! { "$pull": { "users": &user.id } })
|
||||
.await
|
||||
{
|
||||
warn!("Failed to remove deleted user from user groups | {e:?}");
|
||||
};
|
||||
Ok(user)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{komodo_timestamp, user_group::UserGroup},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -23,7 +25,10 @@ impl Resolve<WriteArgs> for CreateUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let user_group = UserGroup {
|
||||
name: self.name,
|
||||
@@ -58,7 +63,10 @@ impl Resolve<WriteArgs> for RenameUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
@@ -84,7 +92,10 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -117,7 +128,10 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -161,7 +175,10 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -205,7 +222,10 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
@@ -252,7 +272,10 @@ impl Resolve<WriteArgs> for SetEveryoneUserGroup {
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only").into());
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let db = db_client();
|
||||
|
||||
@@ -4,7 +4,9 @@ use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{Operation, ResourceTarget, variable::Variable},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
@@ -22,6 +24,13 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("Only admins can create variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let CreateVariable {
|
||||
name,
|
||||
value,
|
||||
@@ -29,10 +38,6 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
is_secret,
|
||||
} = self;
|
||||
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables").into());
|
||||
}
|
||||
|
||||
let variable = Variable {
|
||||
name,
|
||||
value,
|
||||
@@ -44,7 +49,7 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
.variables
|
||||
.insert_one(&variable)
|
||||
.await
|
||||
.context("failed to create variable on db")?;
|
||||
.context("Failed to create variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
@@ -69,7 +74,10 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateVariableValueResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can update variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let UpdateVariableValue { name, value } = self;
|
||||
@@ -87,7 +95,7 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
doc! { "$set": { "value": &value } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable value on db")?;
|
||||
.context("Failed to update variable value on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
@@ -107,7 +115,7 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
)
|
||||
};
|
||||
|
||||
update.push_simple_log("update variable value", log);
|
||||
update.push_simple_log("Update Variable Value", log);
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
@@ -123,7 +131,10 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateVariableDescriptionResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can update variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.variables
|
||||
@@ -132,7 +143,7 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
|
||||
doc! { "$set": { "description": &self.description } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable description on db")?;
|
||||
.context("Failed to update variable description on db")?;
|
||||
Ok(get_variable(&self.name).await?)
|
||||
}
|
||||
}
|
||||
@@ -144,7 +155,10 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateVariableIsSecretResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can update variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can update variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.variables
|
||||
@@ -153,7 +167,7 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
|
||||
doc! { "$set": { "is_secret": self.is_secret } },
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable is secret on db")?;
|
||||
.context("Failed to update variable is secret on db")?;
|
||||
Ok(get_variable(&self.name).await?)
|
||||
}
|
||||
}
|
||||
@@ -164,14 +178,17 @@ impl Resolve<WriteArgs> for DeleteVariable {
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can delete variables").into());
|
||||
return Err(
|
||||
anyhow!("Only admins can delete variables")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let variable = get_variable(&self.name).await?;
|
||||
db_client()
|
||||
.variables
|
||||
.delete_one(doc! { "name": &self.name })
|
||||
.await
|
||||
.context("failed to delete variable on db")?;
|
||||
.context("Failed to delete variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
@@ -180,7 +197,7 @@ impl Resolve<WriteArgs> for DeleteVariable {
|
||||
);
|
||||
|
||||
update
|
||||
.push_simple_log("delete variable", format!("{variable:#?}"));
|
||||
.push_simple_log("Delete Variable", format!("{variable:#?}"));
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
@@ -187,8 +187,8 @@ impl GoogleOauthClient {
|
||||
Ok(body)
|
||||
} else {
|
||||
let text = res.text().await.context(format!(
|
||||
"method: POST | status: {status} | failed to get response text"
|
||||
))?;
|
||||
"method: POST | status: {status} | failed to get response text"
|
||||
))?;
|
||||
Err(anyhow!("method: POST | status: {status} | text: {text}"))
|
||||
}
|
||||
}
|
||||
@@ -207,5 +207,6 @@ pub struct GoogleUser {
|
||||
#[serde(rename = "sub")]
|
||||
pub id: String,
|
||||
pub email: String,
|
||||
#[serde(default)]
|
||||
pub picture: String,
|
||||
}
|
||||
|
||||
@@ -84,6 +84,8 @@ pub async fn launch_ec2_instance(
|
||||
assign_public_ip,
|
||||
use_public_ip,
|
||||
user_data,
|
||||
periphery_public_key: _,
|
||||
insecure_tls: _,
|
||||
port: _,
|
||||
use_https: _,
|
||||
git_providers: _,
|
||||
|
||||
@@ -4,6 +4,8 @@ pub mod aws;
|
||||
pub enum BuildCleanupData {
|
||||
/// Nothing to clean up
|
||||
Server,
|
||||
/// Cleanup Periphery connection
|
||||
Url,
|
||||
/// Clean up AWS instance
|
||||
Aws { instance_id: String, region: String },
|
||||
}
|
||||
|
||||
@@ -16,6 +16,94 @@ use komodo_client::entities::{
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
use noise::key::{SpkiPublicKey, load_maybe_generate_private_key};
|
||||
|
||||
/// Should call in startup to ensure Core errors without valid private key.
|
||||
pub fn core_private_key() -> &'static String {
|
||||
static CORE_PRIVATE_KEY: OnceLock<String> = OnceLock::new();
|
||||
CORE_PRIVATE_KEY.get_or_init(|| {
|
||||
let config = core_config();
|
||||
if let Some(path) = config.private_key.strip_prefix("file:") {
|
||||
load_maybe_generate_private_key(path).unwrap()
|
||||
} else {
|
||||
config.private_key.clone()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Should call in startup to ensure Core errors without valid private key.
|
||||
pub fn core_public_key() -> &'static String {
|
||||
static CORE_PUBLIC_KEY: OnceLock<String> = OnceLock::new();
|
||||
CORE_PUBLIC_KEY.get_or_init(|| {
|
||||
SpkiPublicKey::from_private_key(core_private_key())
|
||||
.context("Got invalid private key")
|
||||
.unwrap()
|
||||
.into_inner()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn core_connection_query() -> &'static String {
|
||||
static CORE_HOSTNAME: OnceLock<String> = OnceLock::new();
|
||||
CORE_HOSTNAME.get_or_init(|| {
|
||||
let host = url::Url::parse(&core_config().host)
|
||||
.context("Failed to parse config field 'host' as URL")
|
||||
.unwrap()
|
||||
.host()
|
||||
.context(
|
||||
"Failed to parse config field 'host' | missing host part",
|
||||
)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
format!("core={}", urlencoding::encode(&host))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn periphery_public_keys() -> Option<&'static [SpkiPublicKey]> {
|
||||
static PERIPHERY_PUBLIC_KEYS: OnceLock<Option<Vec<SpkiPublicKey>>> =
|
||||
OnceLock::new();
|
||||
PERIPHERY_PUBLIC_KEYS
|
||||
.get_or_init(|| {
|
||||
core_config().periphery_public_keys.as_ref().map(
|
||||
|public_keys| {
|
||||
public_keys
|
||||
.iter()
|
||||
.flat_map(|public_key| {
|
||||
let (path, maybe_pem) = if let Some(path) =
|
||||
public_key.strip_prefix("file:")
|
||||
{
|
||||
match std::fs::read_to_string(path).with_context(
|
||||
|| format!("Failed to read periphery public key at {path:?}"),
|
||||
) {
|
||||
Ok(public_key) => (Some(path), public_key),
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(None, public_key.clone())
|
||||
};
|
||||
match SpkiPublicKey::from_maybe_pem(&maybe_pem) {
|
||||
Ok(public_key) => Some(public_key),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to read periphery public key{} | {e:#}",
|
||||
if let Some(path) = path {
|
||||
format!("at {path:?}")
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
},
|
||||
)
|
||||
})
|
||||
.as_deref()
|
||||
}
|
||||
|
||||
pub fn core_config() -> &'static CoreConfig {
|
||||
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
|
||||
@@ -88,9 +176,11 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
// recreating CoreConfig here makes sure apply all env overrides applied.
|
||||
CoreConfig {
|
||||
// Secret things overridden with file
|
||||
jwt_secret: maybe_read_item_from_file(env.komodo_jwt_secret_file, env.komodo_jwt_secret).unwrap_or(config.jwt_secret),
|
||||
private_key: maybe_read_item_from_file(env.komodo_private_key_file, env.komodo_private_key)
|
||||
.unwrap_or(config.private_key),
|
||||
passkey: maybe_read_item_from_file(env.komodo_passkey_file, env.komodo_passkey)
|
||||
.unwrap_or(config.passkey),
|
||||
.or(config.passkey),
|
||||
jwt_secret: maybe_read_item_from_file(env.komodo_jwt_secret_file, env.komodo_jwt_secret).unwrap_or(config.jwt_secret),
|
||||
webhook_secret: maybe_read_item_from_file(env.komodo_webhook_secret_file, env.komodo_webhook_secret)
|
||||
.unwrap_or(config.webhook_secret),
|
||||
database: DatabaseConfig {
|
||||
@@ -177,8 +267,9 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
port: env.komodo_port.unwrap_or(config.port),
|
||||
bind_ip: env.komodo_bind_ip.unwrap_or(config.bind_ip),
|
||||
timezone: env.komodo_timezone.unwrap_or(config.timezone),
|
||||
first_server: env.komodo_first_server.or(config.first_server),
|
||||
first_server_name: env.komodo_first_server_name.unwrap_or(config.first_server_name),
|
||||
periphery_public_keys: env.komodo_periphery_public_keys.or(config.periphery_public_keys),
|
||||
first_server_address: env.komodo_first_server_address.or(config.first_server_address),
|
||||
first_server_name: env.komodo_first_server_name.or(config.first_server_name),
|
||||
frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path),
|
||||
jwt_ttl: env
|
||||
.komodo_jwt_ttl
|
||||
@@ -250,6 +341,7 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.unwrap_or(config.logging.opentelemetry_service_name),
|
||||
},
|
||||
pretty_startup_config: env.komodo_pretty_startup_config.unwrap_or(config.pretty_startup_config),
|
||||
unsafe_unsanitized_startup_config: env.komodo_unsafe_unsanitized_startup_config.unwrap_or(config.unsafe_unsanitized_startup_config),
|
||||
internet_interface: env.komodo_internet_interface.unwrap_or(config.internet_interface),
|
||||
ssl_enabled: env.komodo_ssl_enabled.unwrap_or(config.ssl_enabled),
|
||||
ssl_key_file: env.komodo_ssl_key_file.unwrap_or(config.ssl_key_file),
|
||||
|
||||
186
bin/core/src/connection/client.rs
Normal file
186
bin/core/src/connection/client.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use periphery_client::CONNECTION_RETRY_SECONDS;
|
||||
use serror::{deserialize_error_bytes, serialize_error_bytes};
|
||||
use transport::{
|
||||
MessageState,
|
||||
auth::{
|
||||
AddressConnectionIdentifiers, ClientLoginFlow,
|
||||
ConnectionIdentifiers,
|
||||
},
|
||||
fix_ws_address,
|
||||
websocket::{Websocket, tungstenite::TungsteniteWebsocket},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::{core_config, core_connection_query},
|
||||
periphery::ConnectionChannels,
|
||||
state::periphery_connections,
|
||||
};
|
||||
|
||||
use super::{PeripheryConnection, PeripheryConnectionArgs};
|
||||
|
||||
impl PeripheryConnectionArgs<'_> {
|
||||
pub async fn spawn_client_connection(
|
||||
self,
|
||||
id: String,
|
||||
insecure: bool,
|
||||
passkey: String,
|
||||
) -> anyhow::Result<Arc<ConnectionChannels>> {
|
||||
let Some(address) = self.address else {
|
||||
return Err(anyhow!(
|
||||
"Cannot spawn client connection with empty address"
|
||||
));
|
||||
};
|
||||
|
||||
let address = fix_ws_address(address);
|
||||
let identifiers =
|
||||
AddressConnectionIdentifiers::extract(&address)?;
|
||||
let endpoint = format!("{address}/?{}", core_connection_query());
|
||||
|
||||
let (connection, mut receiver) =
|
||||
periphery_connections().insert(id.clone(), self).await;
|
||||
|
||||
let channels = connection.channels.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let ws = tokio::select! {
|
||||
ws = TungsteniteWebsocket::connect_maybe_tls_insecure(
|
||||
&endpoint,
|
||||
insecure && endpoint.starts_with("wss"),
|
||||
) => ws,
|
||||
_ = connection.cancel.cancelled() => {
|
||||
break
|
||||
}
|
||||
};
|
||||
|
||||
let (mut socket, accept) = match ws {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
connection.set_error(e.error).await;
|
||||
tokio::time::sleep(Duration::from_secs(
|
||||
CONNECTION_RETRY_SECONDS,
|
||||
))
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let identifiers = identifiers.build(
|
||||
accept.as_bytes(),
|
||||
core_connection_query().as_bytes(),
|
||||
);
|
||||
|
||||
if let Err(e) = connection
|
||||
.client_login(&mut socket, identifiers, &passkey)
|
||||
.await
|
||||
{
|
||||
connection.set_error(e).await;
|
||||
tokio::time::sleep(Duration::from_secs(
|
||||
CONNECTION_RETRY_SECONDS,
|
||||
))
|
||||
.await;
|
||||
continue;
|
||||
};
|
||||
|
||||
connection.handle_socket(socket, &mut receiver).await
|
||||
}
|
||||
});
|
||||
|
||||
Ok(channels)
|
||||
}
|
||||
}
|
||||
|
||||
impl PeripheryConnection {
|
||||
/// Custom Core -> Periphery side only login wrapper
|
||||
/// to implement passkey support for backward compatibility
|
||||
async fn client_login(
|
||||
&self,
|
||||
socket: &mut TungsteniteWebsocket,
|
||||
identifiers: ConnectionIdentifiers<'_>,
|
||||
// for legacy auth
|
||||
passkey: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
// Get the required auth type
|
||||
let bytes = socket
|
||||
.recv_bytes()
|
||||
.with_timeout(Duration::from_secs(2))
|
||||
.await?
|
||||
.context("Failed to receive login type indicator")?;
|
||||
|
||||
match bytes.iter().as_slice() {
|
||||
// Noise auth
|
||||
&[0] => {
|
||||
self
|
||||
.handle_login::<_, ClientLoginFlow>(socket, identifiers)
|
||||
.await
|
||||
}
|
||||
// Passkey auth
|
||||
&[1] => handle_passkey_login(socket, passkey).await,
|
||||
other => Err(anyhow!(
|
||||
"Receieved invalid login type pattern: {other:?}"
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_passkey_login(
|
||||
socket: &mut TungsteniteWebsocket,
|
||||
// for legacy auth
|
||||
passkey: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let res = async {
|
||||
let mut passkey = if passkey.is_empty() {
|
||||
core_config()
|
||||
.passkey
|
||||
.as_deref()
|
||||
.context("Periphery requires passkey auth")?
|
||||
.as_bytes()
|
||||
.to_vec()
|
||||
} else {
|
||||
passkey.as_bytes().to_vec()
|
||||
};
|
||||
passkey.push(MessageState::Successful.as_byte());
|
||||
|
||||
socket
|
||||
.send(passkey.into())
|
||||
.await
|
||||
.context("Failed to send passkey")?;
|
||||
|
||||
// Receive login state message and return based on value
|
||||
let state_msg = socket
|
||||
.recv_bytes()
|
||||
.await
|
||||
.context("Failed to receive authentication state message")?;
|
||||
let state = state_msg.last().context(
|
||||
"Authentication state message did not contain state byte",
|
||||
)?;
|
||||
match MessageState::from_byte(*state) {
|
||||
MessageState::Successful => anyhow::Ok(()),
|
||||
_ => Err(deserialize_error_bytes(
|
||||
&state_msg[..(state_msg.len() - 1)],
|
||||
)),
|
||||
}
|
||||
}
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
let mut bytes = serialize_error_bytes(&e);
|
||||
bytes.push(MessageState::Failed.as_byte());
|
||||
if let Err(e) = socket
|
||||
.send(bytes.into())
|
||||
.await
|
||||
.context("Failed to send login failed to client")
|
||||
{
|
||||
// Log additional error
|
||||
warn!("{e:#}");
|
||||
}
|
||||
// Close socket
|
||||
let _ = socket.close(None).await;
|
||||
// Return the original error
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
480
bin/core/src/connection/mod.rs
Normal file
480
bin/core/src/connection/mod.rs
Normal file
@@ -0,0 +1,480 @@
|
||||
use std::{
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{self, AtomicBool},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use bytes::Bytes;
|
||||
use cache::CloneCache;
|
||||
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use komodo_client::entities::{
|
||||
builder::{AwsBuilderConfig, UrlBuilderConfig},
|
||||
optional_str,
|
||||
server::Server,
|
||||
};
|
||||
use serror::serror_into_anyhow_error;
|
||||
use tokio::sync::{
|
||||
RwLock,
|
||||
mpsc::{Sender, error::SendError},
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use transport::{
|
||||
auth::{
|
||||
ConnectionIdentifiers, LoginFlow, LoginFlowArgs,
|
||||
PublicKeyValidator,
|
||||
},
|
||||
bytes::id_from_transport_bytes,
|
||||
channel::{BufferedReceiver, buffered_channel},
|
||||
websocket::{
|
||||
Websocket, WebsocketMessage, WebsocketReceiver as _,
|
||||
WebsocketSender as _,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::{core_private_key, periphery_public_keys},
|
||||
periphery::ConnectionChannels,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
pub mod client;
|
||||
pub mod server;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PeripheryConnections(
|
||||
CloneCache<String, Arc<PeripheryConnection>>,
|
||||
);
|
||||
|
||||
impl PeripheryConnections {
|
||||
/// Insert a recreated connection.
|
||||
/// Ensures the fields which must be persisted between
|
||||
/// connection recreation are carried over.
|
||||
pub async fn insert(
|
||||
&self,
|
||||
server_id: String,
|
||||
args: PeripheryConnectionArgs<'_>,
|
||||
) -> (Arc<PeripheryConnection>, BufferedReceiver<Bytes>) {
|
||||
let (connection, receiver) = if let Some(existing_connection) =
|
||||
self.0.remove(&server_id).await
|
||||
{
|
||||
existing_connection.with_new_args(args)
|
||||
} else {
|
||||
PeripheryConnection::new(args)
|
||||
};
|
||||
|
||||
self.0.insert(server_id, connection.clone()).await;
|
||||
|
||||
(connection, receiver)
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
&self,
|
||||
server_id: &String,
|
||||
) -> Option<Arc<PeripheryConnection>> {
|
||||
self.0.get(server_id).await
|
||||
}
|
||||
|
||||
/// Remove and cancel connection
|
||||
pub async fn remove(
|
||||
&self,
|
||||
server_id: &String,
|
||||
) -> Option<Arc<PeripheryConnection>> {
|
||||
self
|
||||
.0
|
||||
.remove(server_id)
|
||||
.await
|
||||
.inspect(|connection| connection.cancel())
|
||||
}
|
||||
}
|
||||
|
||||
/// The configurable args of a connection
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub struct PeripheryConnectionArgs<'a> {
|
||||
/// Usually the server id
|
||||
pub id: &'a str,
|
||||
pub address: Option<&'a str>,
|
||||
periphery_public_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl PublicKeyValidator for PeripheryConnectionArgs<'_> {
|
||||
type ValidationResult = String;
|
||||
async fn validate(
|
||||
&self,
|
||||
public_key: String,
|
||||
) -> anyhow::Result<Self::ValidationResult> {
|
||||
let invalid_error = || {
|
||||
spawn_update_attempted_public_key(
|
||||
self.id.to_string(),
|
||||
Some(public_key.clone()),
|
||||
);
|
||||
let e = anyhow!("{public_key} is invalid")
|
||||
.context(
|
||||
"Ensure public key matches configured Periphery Public Key",
|
||||
)
|
||||
.context("Core failed to validate Periphery public key");
|
||||
e
|
||||
};
|
||||
let core_to_periphery = self.address.is_some();
|
||||
match (self.periphery_public_key, core_to_periphery) {
|
||||
// The key matches expected.
|
||||
(Some(expected), _) if public_key == expected => Ok(public_key),
|
||||
// Explicit auth failed.
|
||||
(Some(_), _) => Err(invalid_error()),
|
||||
// Core -> Periphery connections with no explicit
|
||||
// Periphery public key are not validated.
|
||||
(None, true) => Ok(public_key),
|
||||
// Periphery -> Core connections with no explicit
|
||||
// Periphery public key can fall back to Core config `periphery_public_keys` if defined.
|
||||
(None, false) => {
|
||||
let expected =
|
||||
periphery_public_keys().ok_or_else(invalid_error)?;
|
||||
if expected
|
||||
.iter()
|
||||
.any(|expected| public_key == expected.as_str())
|
||||
{
|
||||
Ok(public_key)
|
||||
} else {
|
||||
Err(invalid_error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PeripheryConnectionArgs<'a> {
|
||||
pub fn from_server(server: &'a Server) -> Self {
|
||||
Self {
|
||||
id: &server.id,
|
||||
address: optional_str(&server.config.address),
|
||||
periphery_public_key: optional_str(&server.info.public_key),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_url_builder(
|
||||
id: &'a str,
|
||||
config: &'a UrlBuilderConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
address: optional_str(&config.address),
|
||||
periphery_public_key: optional_str(
|
||||
&config.periphery_public_key,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_aws_builder(
|
||||
id: &'a str,
|
||||
address: &'a str,
|
||||
config: &'a AwsBuilderConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
address: Some(address),
|
||||
periphery_public_key: optional_str(
|
||||
&config.periphery_public_key,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_owned(self) -> OwnedPeripheryConnectionArgs {
|
||||
OwnedPeripheryConnectionArgs {
|
||||
id: self.id.to_string(),
|
||||
address: self.address.map(str::to_string),
|
||||
periphery_public_key: self
|
||||
.periphery_public_key
|
||||
.map(str::to_string),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn matches<'b>(
|
||||
self,
|
||||
args: impl Into<PeripheryConnectionArgs<'b>>,
|
||||
) -> bool {
|
||||
self == args.into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OwnedPeripheryConnectionArgs {
|
||||
/// Usually the Server id.
|
||||
pub id: String,
|
||||
/// Specify outbound connection address.
|
||||
/// Inbound connections have this as None
|
||||
pub address: Option<String>,
|
||||
/// The public key to expect Periphery to have.
|
||||
/// If None, must have 'periphery_public_keys' set
|
||||
/// in Core config, or will error
|
||||
pub periphery_public_key: Option<String>,
|
||||
}
|
||||
|
||||
impl OwnedPeripheryConnectionArgs {
|
||||
pub fn borrow(&self) -> PeripheryConnectionArgs<'_> {
|
||||
PeripheryConnectionArgs {
|
||||
id: &self.id,
|
||||
address: self.address.as_deref(),
|
||||
periphery_public_key: self.periphery_public_key.as_deref(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PeripheryConnectionArgs<'_>>
|
||||
for OwnedPeripheryConnectionArgs
|
||||
{
|
||||
fn from(value: PeripheryConnectionArgs<'_>) -> Self {
|
||||
value.to_owned()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a OwnedPeripheryConnectionArgs>
|
||||
for PeripheryConnectionArgs<'a>
|
||||
{
|
||||
fn from(value: &'a OwnedPeripheryConnectionArgs) -> Self {
|
||||
value.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PeripheryConnection {
|
||||
/// The connection args
|
||||
pub args: OwnedPeripheryConnectionArgs,
|
||||
/// Send and receive bytes over the connection socket.
|
||||
pub sender: Sender<Bytes>,
|
||||
/// Cancel the connection
|
||||
pub cancel: CancellationToken,
|
||||
/// Whether Periphery is currently connected.
|
||||
pub connected: AtomicBool,
|
||||
// These fields must be maintained if new connection replaces old
|
||||
// at the same server id.
|
||||
/// Stores latest connection error
|
||||
pub error: Arc<RwLock<Option<serror::Serror>>>,
|
||||
/// Forward bytes from Periphery to specific channel handlers.
|
||||
pub channels: Arc<ConnectionChannels>,
|
||||
}
|
||||
|
||||
impl PeripheryConnection {
|
||||
pub fn new(
|
||||
args: impl Into<OwnedPeripheryConnectionArgs>,
|
||||
) -> (Arc<PeripheryConnection>, BufferedReceiver<Bytes>) {
|
||||
let (sender, receiever) = buffered_channel();
|
||||
(
|
||||
PeripheryConnection {
|
||||
sender,
|
||||
args: args.into(),
|
||||
cancel: CancellationToken::new(),
|
||||
connected: AtomicBool::new(false),
|
||||
error: Default::default(),
|
||||
channels: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
receiever,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn with_new_args(
|
||||
&self,
|
||||
args: impl Into<OwnedPeripheryConnectionArgs>,
|
||||
) -> (Arc<PeripheryConnection>, BufferedReceiver<Bytes>) {
|
||||
// Ensure this connection is cancelled.
|
||||
self.cancel();
|
||||
let (sender, receiever) = buffered_channel();
|
||||
(
|
||||
PeripheryConnection {
|
||||
sender,
|
||||
args: args.into(),
|
||||
cancel: CancellationToken::new(),
|
||||
connected: AtomicBool::new(false),
|
||||
error: self.error.clone(),
|
||||
channels: self.channels.clone(),
|
||||
}
|
||||
.into(),
|
||||
receiever,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn handle_login<W: Websocket, L: LoginFlow>(
|
||||
&self,
|
||||
socket: &mut W,
|
||||
identifiers: ConnectionIdentifiers<'_>,
|
||||
) -> anyhow::Result<()> {
|
||||
L::login(LoginFlowArgs {
|
||||
socket,
|
||||
identifiers,
|
||||
private_key: core_private_key(),
|
||||
public_key_validator: self.args.borrow(),
|
||||
})
|
||||
.await?;
|
||||
// Clear attempted public key after successful login
|
||||
spawn_update_attempted_public_key(self.args.id.clone(), None);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_socket<W: Websocket>(
|
||||
&self,
|
||||
socket: W,
|
||||
receiver: &mut BufferedReceiver<Bytes>,
|
||||
) {
|
||||
let cancel = self.cancel.child_token();
|
||||
|
||||
self.set_connected(true);
|
||||
self.clear_error().await;
|
||||
|
||||
let (mut ws_write, mut ws_read) = socket.split();
|
||||
|
||||
let forward_writes = async {
|
||||
loop {
|
||||
let next = tokio::select! {
|
||||
next = receiver.recv() => next,
|
||||
_ = cancel.cancelled() => break,
|
||||
};
|
||||
|
||||
let message = match next {
|
||||
Some(request) => Bytes::copy_from_slice(request),
|
||||
// Sender Dropped (shouldn't happen, a reference is held on 'connection').
|
||||
None => break,
|
||||
};
|
||||
|
||||
match ws_write.send(message).await {
|
||||
Ok(_) => receiver.clear_buffer(),
|
||||
Err(e) => {
|
||||
self.set_error(e.into()).await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Cancel again if not already
|
||||
let _ = ws_write.close(None).await;
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
let handle_reads = async {
|
||||
loop {
|
||||
let next = tokio::select! {
|
||||
next = ws_read.recv() => next,
|
||||
_ = cancel.cancelled() => break,
|
||||
};
|
||||
|
||||
match next {
|
||||
Ok(WebsocketMessage::Binary(bytes)) => {
|
||||
self.handle_incoming_bytes(bytes).await
|
||||
}
|
||||
Ok(WebsocketMessage::Close(_))
|
||||
| Ok(WebsocketMessage::Closed) => {
|
||||
self.set_error(anyhow!("Connection closed")).await;
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
self.set_error(e.into()).await;
|
||||
}
|
||||
};
|
||||
}
|
||||
// Cancel again if not already
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
tokio::join!(forward_writes, handle_reads);
|
||||
|
||||
self.set_connected(false);
|
||||
}
|
||||
|
||||
pub async fn handle_incoming_bytes(&self, bytes: Bytes) {
|
||||
let id = match id_from_transport_bytes(&bytes) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
// TODO: handle better
|
||||
warn!("Failed to read id | {e:#}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
let Some(channel) = self.channels.get(&id).await else {
|
||||
// TODO: handle better
|
||||
debug!("Failed to send response | No response channel found");
|
||||
return;
|
||||
};
|
||||
if let Err(e) = channel.send(bytes).await {
|
||||
// TODO: handle better
|
||||
warn!("Failed to send response | Channel failure | {e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn send(
|
||||
&self,
|
||||
value: Bytes,
|
||||
) -> Result<(), SendError<Bytes>> {
|
||||
self.sender.send(value).await
|
||||
}
|
||||
|
||||
pub fn set_connected(&self, connected: bool) {
|
||||
self.connected.store(connected, atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn connected(&self) -> bool {
|
||||
self.connected.load(atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Polls connected 3 times (500ms in between) before bailing.
|
||||
pub async fn bail_if_not_connected(&self) -> anyhow::Result<()> {
|
||||
const POLL_TIMES: usize = 3;
|
||||
for i in 0..POLL_TIMES {
|
||||
if self.connected() {
|
||||
return Ok(());
|
||||
}
|
||||
if i < POLL_TIMES - 1 {
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
}
|
||||
if let Some(e) = self.error().await {
|
||||
Err(serror_into_anyhow_error(e))
|
||||
} else {
|
||||
Err(anyhow!("Server is not currently connected"))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn error(&self) -> Option<serror::Serror> {
|
||||
self.error.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn set_error(&self, e: anyhow::Error) {
|
||||
let mut error = self.error.write().await;
|
||||
*error = Some(e.into());
|
||||
}
|
||||
|
||||
pub async fn clear_error(&self) {
|
||||
let mut error = self.error.write().await;
|
||||
*error = None;
|
||||
}
|
||||
|
||||
pub fn cancel(&self) {
|
||||
self.cancel.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn task to set the 'attempted_public_key'
|
||||
/// for easy manual connection acceptance later on.
|
||||
fn spawn_update_attempted_public_key(
|
||||
id: String,
|
||||
public_key: impl Into<Option<String>>,
|
||||
) {
|
||||
let public_key = public_key.into();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db_client().servers,
|
||||
&id,
|
||||
doc! {
|
||||
"$set": {
|
||||
"info.attempted_public_key": &public_key.as_deref().unwrap_or_default(),
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to update attempted public_key for Server {id} | {e:?}"
|
||||
);
|
||||
};
|
||||
});
|
||||
}
|
||||
349
bin/core/src/connection/server.rs
Normal file
349
bin/core/src/connection/server.rs
Normal file
@@ -0,0 +1,349 @@
|
||||
use std::{str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
extract::{Query, WebSocketUpgrade},
|
||||
http::{HeaderMap, StatusCode},
|
||||
response::Response,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{
|
||||
api::write::{CreateBuilder, CreateServer, UpdateResourceMeta},
|
||||
entities::{
|
||||
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
|
||||
onboarding_key::OnboardingKey,
|
||||
server::{PartialServerConfig, Server},
|
||||
user::system_user,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCode, AddStatusCodeError};
|
||||
use transport::{
|
||||
MessageState, PeripheryConnectionQuery,
|
||||
auth::{
|
||||
HeaderConnectionIdentifiers, LoginFlow, LoginFlowArgs,
|
||||
PublicKeyValidator, ServerLoginFlow,
|
||||
},
|
||||
websocket::{Websocket, axum::AxumWebsocket},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
config::core_private_key,
|
||||
helpers::query::id_or_name_filter,
|
||||
resource::KomodoResource,
|
||||
state::{db_client, periphery_connections},
|
||||
};
|
||||
|
||||
use super::PeripheryConnectionArgs;
|
||||
|
||||
pub async fn handler(
|
||||
Query(PeripheryConnectionQuery {
|
||||
server: server_query,
|
||||
}): Query<PeripheryConnectionQuery>,
|
||||
mut headers: HeaderMap,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
let identifiers =
|
||||
HeaderConnectionIdentifiers::extract(&mut headers)
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
|
||||
if server_query.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Must provide non-empty server specifier")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
}
|
||||
|
||||
// Handle connection vs. onboarding flow.
|
||||
match Server::coll()
|
||||
.find_one(id_or_name_filter(&server_query))
|
||||
.await
|
||||
.context("Failed to query database for Server")?
|
||||
{
|
||||
Some(server) => {
|
||||
existing_server_handler(server_query, server, identifiers, ws)
|
||||
.await
|
||||
}
|
||||
None if ObjectId::from_str(&server_query).is_err() => {
|
||||
onboard_server_handler(server_query, identifiers, ws).await
|
||||
}
|
||||
None => Err(
|
||||
anyhow!("Must provide name based Server specifier for onboarding flow, name cannot be valid ObjectId (hex)")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn existing_server_handler(
|
||||
server_query: String,
|
||||
server: Server,
|
||||
identifiers: HeaderConnectionIdentifiers,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
if !server.config.enabled {
|
||||
return Err(anyhow!("Server is Disabled."))
|
||||
.status_code(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
if !server.config.address.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Server is configured to use a Core -> Periphery connection."
|
||||
))
|
||||
.status_code(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let connections = periphery_connections();
|
||||
|
||||
// Ensure connected server can't get bumped off the connection.
|
||||
// Treat this as authorization issue.
|
||||
if let Some(existing_connection) = connections.get(&server.id).await
|
||||
&& existing_connection.connected()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("A Server '{server_query}' is already connected")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
}
|
||||
|
||||
let (connection, mut receiver) = periphery_connections()
|
||||
.insert(
|
||||
server.id.clone(),
|
||||
PeripheryConnectionArgs::from_server(&server),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(ws.on_upgrade(|socket| async move {
|
||||
let query =
|
||||
format!("server={}", urlencoding::encode(&server_query));
|
||||
let mut socket = AxumWebsocket(socket);
|
||||
|
||||
if let Err(e) = socket.send(Bytes::from_owner([0])).await.context(
|
||||
"Failed to send the login flow indicator over connnection",
|
||||
) {
|
||||
connection.set_error(e).await;
|
||||
return;
|
||||
};
|
||||
|
||||
if let Err(e) = connection
|
||||
.handle_login::<_, ServerLoginFlow>(
|
||||
&mut socket,
|
||||
identifiers.build(query.as_bytes()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
connection.set_error(e).await;
|
||||
return;
|
||||
}
|
||||
|
||||
connection.handle_socket(socket, &mut receiver).await
|
||||
}))
|
||||
}
|
||||
|
||||
async fn onboard_server_handler(
|
||||
server_query: String,
|
||||
identifiers: HeaderConnectionIdentifiers,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
Ok(ws.on_upgrade(|socket| async move {
|
||||
let query =
|
||||
format!("server={}", urlencoding::encode(&server_query));
|
||||
let mut socket = AxumWebsocket(socket);
|
||||
|
||||
if let Err(e) = socket.send(Bytes::from_owner([1])).await.context(
|
||||
"Failed to send the login flow indicator over connnection",
|
||||
).context("Server onboarding error") {
|
||||
warn!("{e:#}");
|
||||
return;
|
||||
};
|
||||
|
||||
let onboarding_key = match ServerLoginFlow::login(LoginFlowArgs {
|
||||
socket: &mut socket,
|
||||
identifiers: identifiers.build(query.as_bytes()),
|
||||
private_key: core_private_key(),
|
||||
public_key_validator: CreationKeyValidator,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(onboarding_key) => onboarding_key,
|
||||
Err(e) => {
|
||||
debug!("Server {server_query} failed to onboard | {e:#}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let res = socket
|
||||
.recv_bytes()
|
||||
.with_timeout(Duration::from_secs(2))
|
||||
.await
|
||||
.and_then(|res| {
|
||||
res.and_then(|public_key_bytes| {
|
||||
String::from_utf8(public_key_bytes.into())
|
||||
.context("Public key bytes are not valid utf8")
|
||||
})
|
||||
});
|
||||
|
||||
// Post onboarding login 1: Receive public key
|
||||
let public_key = match res
|
||||
{
|
||||
Ok(public_key) => public_key,
|
||||
Err(e) => {
|
||||
warn!("Server {server_query} failed to onboard | failed to receive Server public key | {e:#}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
let server_id = match create_server_maybe_builder(
|
||||
server_query,
|
||||
public_key,
|
||||
onboarding_key.copy_server,
|
||||
onboarding_key.tags,
|
||||
onboarding_key.create_builder
|
||||
).await {
|
||||
Ok(server_id) => server_id,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
if let Err(e) = socket
|
||||
.send_error(&e)
|
||||
.await
|
||||
.context("Failed to send Server creation failed to client")
|
||||
{
|
||||
// Log additional error
|
||||
warn!("{e:#}");
|
||||
}
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = socket
|
||||
.send(MessageState::Successful.into())
|
||||
.await
|
||||
.context("Failed to send Server creation successful to client")
|
||||
{
|
||||
// Log additional error
|
||||
warn!("{e:#}");
|
||||
}
|
||||
|
||||
// Server created, close and trigger reconnect
|
||||
// and handling using existing server handler.
|
||||
let _ = socket.close(None).await;
|
||||
|
||||
// Add the server to onboarding key "Onboarded"
|
||||
let res = db_client()
|
||||
.onboarding_keys
|
||||
.update_one(
|
||||
doc! { "public_key": &onboarding_key.public_key },
|
||||
doc! { "$push": { "onboarded": server_id } },
|
||||
).await;
|
||||
if let Err(e) = res {
|
||||
warn!("Failed to update onboarding key 'onboarded' | {e:?}");
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn create_server_maybe_builder(
|
||||
server_query: String,
|
||||
public_key: String,
|
||||
copy_server: String,
|
||||
tags: Vec<String>,
|
||||
create_builder: bool,
|
||||
) -> anyhow::Result<String> {
|
||||
let config = if copy_server.is_empty() {
|
||||
PartialServerConfig {
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}
|
||||
} else {
|
||||
let config = match db_client().servers.find_one(id_or_name_filter(©_server)).await {
|
||||
Ok(Some(server)) => server.config,
|
||||
Ok(None) => {
|
||||
warn!("Server onboarding: Failed to find Server {}", copy_server);
|
||||
Default::default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to query database for onboarding key 'copy_server' | {e:?}");
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
PartialServerConfig {
|
||||
enabled: Some(true),
|
||||
address: None,
|
||||
..config.into()
|
||||
}
|
||||
};
|
||||
|
||||
let args = WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
};
|
||||
|
||||
let server = CreateServer {
|
||||
name: server_query.clone(),
|
||||
config,
|
||||
public_key: Some(public_key),
|
||||
}
|
||||
.resolve(&args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Server onboarding flow failed at Server creation")?;
|
||||
|
||||
// Don't need to fail, only warn on this
|
||||
if let Err(e) = (UpdateResourceMeta {
|
||||
target: (&server).into(),
|
||||
tags: Some(tags),
|
||||
description: None,
|
||||
template: None,
|
||||
})
|
||||
.resolve(&args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Server onboarding flow failed at Server creation")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
};
|
||||
|
||||
if create_builder {
|
||||
// Don't need to fail, only warn on this
|
||||
if let Err(e) = (CreateBuilder {
|
||||
name: server_query,
|
||||
config: PartialBuilderConfig::Server(
|
||||
PartialServerBuilderConfig {
|
||||
server_id: Some(server.id.clone()),
|
||||
},
|
||||
),
|
||||
})
|
||||
.resolve(&args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Server onboarding flow failed at Builder creation")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
};
|
||||
}
|
||||
|
||||
Ok(server.id)
|
||||
}
|
||||
|
||||
struct CreationKeyValidator;
|
||||
|
||||
impl PublicKeyValidator for CreationKeyValidator {
|
||||
type ValidationResult = OnboardingKey;
|
||||
async fn validate(
|
||||
&self,
|
||||
public_key: String,
|
||||
) -> anyhow::Result<Self::ValidationResult> {
|
||||
let onboarding_key = db_client()
|
||||
.onboarding_keys
|
||||
.find_one(doc! { "public_key": &public_key })
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context("Matching Server onboarding key not found")?;
|
||||
if onboarding_key.enabled {
|
||||
Ok(onboarding_key)
|
||||
} else {
|
||||
Err(anyhow!("Onboarding key is disabled"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use cache::CloneCache;
|
||||
use komodo_client::{
|
||||
busy::Busy,
|
||||
entities::{
|
||||
@@ -12,21 +13,19 @@ use komodo_client::{
|
||||
},
|
||||
};
|
||||
|
||||
use super::cache::Cache;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ActionStates {
|
||||
pub build: Cache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub server: CloneCache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub stack: CloneCache<String, Arc<ActionState<StackActionState>>>,
|
||||
pub deployment:
|
||||
Cache<String, Arc<ActionState<DeploymentActionState>>>,
|
||||
pub server: Cache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
|
||||
CloneCache<String, Arc<ActionState<DeploymentActionState>>>,
|
||||
pub build: CloneCache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub repo: CloneCache<String, Arc<ActionState<RepoActionState>>>,
|
||||
pub procedure:
|
||||
Cache<String, Arc<ActionState<ProcedureActionState>>>,
|
||||
pub action: Cache<String, Arc<ActionState<ActionActionState>>>,
|
||||
pub resource_sync:
|
||||
Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
|
||||
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
|
||||
CloneCache<String, Arc<ActionState<ProcedureActionState>>>,
|
||||
pub action: CloneCache<String, Arc<ActionState<ActionActionState>>>,
|
||||
pub sync:
|
||||
CloneCache<String, Arc<ActionState<ResourceSyncActionState>>>,
|
||||
}
|
||||
|
||||
/// Need to be able to check "busy" with write lock acquired.
|
||||
@@ -62,17 +61,33 @@ impl<States: Default + Busy + Copy + Send + 'static>
|
||||
/// Returns a guard that returns the states to default (not busy) when dropped.
|
||||
pub fn update(
|
||||
&self,
|
||||
handler: impl Fn(&mut States),
|
||||
update_fn: impl Fn(&mut States),
|
||||
) -> anyhow::Result<UpdateGuard<'_, States>> {
|
||||
self.update_custom(
|
||||
update_fn,
|
||||
|states| *states = Default::default(),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
/// Will acquire lock, optionally check busy, and if not will
|
||||
/// run the provided update function on the states.
|
||||
/// Returns a guard that calls the provided return_fn when dropped.
|
||||
pub fn update_custom(
|
||||
&self,
|
||||
update_fn: impl Fn(&mut States),
|
||||
return_fn: impl Fn(&mut States) + Send + 'static,
|
||||
busy_check: bool,
|
||||
) -> anyhow::Result<UpdateGuard<'_, States>> {
|
||||
let mut lock = self
|
||||
.0
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("action state lock poisoned | {e:?}"))?;
|
||||
if lock.busy() {
|
||||
return Err(anyhow!("resource is busy"));
|
||||
.map_err(|e| anyhow!("Action state lock poisoned | {e:?}"))?;
|
||||
if busy_check && lock.busy() {
|
||||
return Err(anyhow!("Resource is busy"));
|
||||
}
|
||||
handler(&mut *lock);
|
||||
Ok(UpdateGuard(&self.0))
|
||||
update_fn(&mut *lock);
|
||||
Ok(UpdateGuard(&self.0, Box::new(return_fn)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,6 +97,7 @@ impl<States: Default + Busy + Copy + Send + 'static>
|
||||
/// user could drop UpdateGuard.
|
||||
pub struct UpdateGuard<'a, States: Default + Send + 'static>(
|
||||
&'a Mutex<States>,
|
||||
Box<dyn Fn(&mut States) + Send>,
|
||||
);
|
||||
|
||||
impl<States: Default + Send + 'static> Drop
|
||||
@@ -95,6 +111,6 @@ impl<States: Default + Send + 'static> Drop
|
||||
return;
|
||||
}
|
||||
};
|
||||
*lock = States::default();
|
||||
self.1(&mut *lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::oid::ObjectId;
|
||||
use formatting::muted;
|
||||
use komodo_client::entities::{
|
||||
Version,
|
||||
@@ -9,10 +10,7 @@ use komodo_client::entities::{
|
||||
server::Server,
|
||||
update::{Log, Update},
|
||||
};
|
||||
use periphery_client::{
|
||||
PeripheryClient,
|
||||
api::{self, GetVersionResponse},
|
||||
};
|
||||
use periphery_client::api::{self, GetVersionResponse};
|
||||
|
||||
use crate::{
|
||||
cloud::{
|
||||
@@ -22,8 +20,9 @@ use crate::{
|
||||
terminate_ec2_instance_with_retry,
|
||||
},
|
||||
},
|
||||
config::core_config,
|
||||
connection::PeripheryConnectionArgs,
|
||||
helpers::update::update_update,
|
||||
periphery::PeripheryClient,
|
||||
resource,
|
||||
};
|
||||
|
||||
@@ -47,27 +46,29 @@ pub async fn get_builder_periphery(
|
||||
"Builder has not yet configured an address"
|
||||
));
|
||||
}
|
||||
// TODO: Dont use builder id, or will be problems
|
||||
// with simultaneous spawned builders.
|
||||
let periphery = PeripheryClient::new(
|
||||
config.address,
|
||||
if config.passkey.is_empty() {
|
||||
core_config().passkey.clone()
|
||||
} else {
|
||||
config.passkey
|
||||
},
|
||||
Duration::from_secs(3),
|
||||
);
|
||||
PeripheryConnectionArgs::from_url_builder(
|
||||
&ObjectId::new().to_hex(),
|
||||
&config,
|
||||
),
|
||||
config.insecure_tls,
|
||||
&config.passkey,
|
||||
)
|
||||
.await?;
|
||||
periphery
|
||||
.health_check()
|
||||
.await
|
||||
.context("Url Builder failed health check")?;
|
||||
Ok((periphery, BuildCleanupData::Server))
|
||||
Ok((periphery, BuildCleanupData::Url))
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
return Err(anyhow!("Builder has not configured a server"));
|
||||
}
|
||||
let server = resource::get::<Server>(&config.server_id).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
Ok((periphery, BuildCleanupData::Server))
|
||||
}
|
||||
BuilderConfig::Aws(config) => {
|
||||
@@ -90,10 +91,8 @@ async fn get_aws_builder(
|
||||
let Ec2Instance { instance_id, ip } =
|
||||
launch_ec2_instance(&instance_name, &config).await?;
|
||||
|
||||
info!("ec2 instance launched");
|
||||
|
||||
let log = Log {
|
||||
stage: "start build instance".to_string(),
|
||||
stage: "Start Build Instance".to_string(),
|
||||
success: true,
|
||||
stdout: start_aws_builder_log(&instance_id, &ip, &config),
|
||||
start_ts: start_create_ts,
|
||||
@@ -105,14 +104,21 @@ async fn get_aws_builder(
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let protocol = if config.use_https { "https" } else { "http" };
|
||||
let protocol = if config.use_https { "wss" } else { "ws" };
|
||||
|
||||
// TODO: Handle ad-hoc (non server) periphery connections. These don't have ids.
|
||||
let periphery_address =
|
||||
format!("{protocol}://{ip}:{}", config.port);
|
||||
let periphery = PeripheryClient::new(
|
||||
&periphery_address,
|
||||
&core_config().passkey,
|
||||
Duration::from_secs(3),
|
||||
);
|
||||
PeripheryConnectionArgs::from_aws_builder(
|
||||
&ObjectId::new().to_hex(),
|
||||
&periphery_address,
|
||||
&config,
|
||||
),
|
||||
config.insecure_tls,
|
||||
"",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let start_connect_ts = komodo_timestamp();
|
||||
let mut res = Ok(GetVersionResponse {
|
||||
@@ -166,6 +172,7 @@ async fn get_aws_builder(
|
||||
|
||||
#[instrument(skip(update))]
|
||||
pub async fn cleanup_builder_instance(
|
||||
periphery: PeripheryClient,
|
||||
cleanup_data: BuildCleanupData,
|
||||
update: &mut Update,
|
||||
) {
|
||||
@@ -173,10 +180,14 @@ pub async fn cleanup_builder_instance(
|
||||
BuildCleanupData::Server => {
|
||||
// Nothing to clean up
|
||||
}
|
||||
BuildCleanupData::Url => {
|
||||
periphery.cleanup().await;
|
||||
}
|
||||
BuildCleanupData::Aws {
|
||||
instance_id,
|
||||
region,
|
||||
} => {
|
||||
periphery.cleanup().await;
|
||||
let _instance_id = instance_id.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ =
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
use std::{collections::HashMap, hash::Hash};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Cache<K: PartialEq + Eq + Hash, T: Clone + Default> {
|
||||
cache: RwLock<HashMap<K, T>>,
|
||||
}
|
||||
|
||||
impl<
|
||||
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
|
||||
T: Clone + Default,
|
||||
> Cache<K, T>
|
||||
{
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get(&self, key: &K) -> Option<T> {
|
||||
self.cache.read().await.get(key).cloned()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_or_insert_default(&self, key: &K) -> T {
|
||||
let mut lock = self.cache.write().await;
|
||||
match lock.get(key).cloned() {
|
||||
Some(item) => item,
|
||||
None => {
|
||||
let item: T = Default::default();
|
||||
lock.insert(key.clone(), item.clone());
|
||||
item
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_list(&self) -> Vec<T> {
|
||||
let cache = self.cache.read().await;
|
||||
cache.values().cloned().collect()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn insert<Key>(&self, key: Key, val: T)
|
||||
where
|
||||
T: std::fmt::Debug,
|
||||
Key: Into<K> + std::fmt::Debug,
|
||||
{
|
||||
self.cache.write().await.insert(key.into(), val);
|
||||
}
|
||||
|
||||
// #[instrument(level = "debug", skip(self, handler))]
|
||||
// pub async fn update_entry<Key>(
|
||||
// &self,
|
||||
// key: Key,
|
||||
// handler: impl Fn(&mut T),
|
||||
// ) where
|
||||
// Key: Into<K> + std::fmt::Debug,
|
||||
// {
|
||||
// let mut cache = self.cache.write().await;
|
||||
// handler(cache.entry(key.into()).or_default());
|
||||
// }
|
||||
|
||||
// #[instrument(level = "debug", skip(self))]
|
||||
// pub async fn clear(&self) {
|
||||
// self.cache.write().await.clear();
|
||||
// }
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn remove(&self, key: &K) {
|
||||
self.cache.write().await.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
// impl<
|
||||
// K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
|
||||
// T: Clone + Default + Busy,
|
||||
// > Cache<K, T>
|
||||
// {
|
||||
// #[instrument(level = "debug", skip(self))]
|
||||
// pub async fn busy(&self, id: &K) -> bool {
|
||||
// match self.get(id).await {
|
||||
// Some(state) => state.busy(),
|
||||
// None => false,
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{fmt::Write, time::Duration};
|
||||
use std::fmt::Write;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::Document;
|
||||
@@ -15,15 +15,16 @@ use komodo_client::entities::{
|
||||
stack::Stack,
|
||||
user::User,
|
||||
};
|
||||
use periphery_client::PeripheryClient;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
use crate::{
|
||||
config::core_config, connection::PeripheryConnectionArgs,
|
||||
periphery::PeripheryClient, state::db_client,
|
||||
};
|
||||
|
||||
pub mod action_state;
|
||||
pub mod all_resources;
|
||||
pub mod builder;
|
||||
pub mod cache;
|
||||
pub mod channel;
|
||||
pub mod maintenance;
|
||||
pub mod matcher;
|
||||
@@ -185,24 +186,18 @@ pub async fn registry_token(
|
||||
|
||||
//
|
||||
|
||||
pub fn periphery_client(
|
||||
pub async fn periphery_client(
|
||||
server: &Server,
|
||||
) -> anyhow::Result<PeripheryClient> {
|
||||
if !server.config.enabled {
|
||||
return Err(anyhow!("server not enabled"));
|
||||
}
|
||||
|
||||
let client = PeripheryClient::new(
|
||||
&server.config.address,
|
||||
if server.config.passkey.is_empty() {
|
||||
&core_config().passkey
|
||||
} else {
|
||||
&server.config.passkey
|
||||
},
|
||||
Duration::from_secs(server.config.timeout_seconds as u64),
|
||||
);
|
||||
|
||||
Ok(client)
|
||||
PeripheryClient::new(
|
||||
PeripheryConnectionArgs::from_server(server),
|
||||
server.config.insecure_tls,
|
||||
&server.config.passkey,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
|
||||
@@ -1101,6 +1101,23 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::RunStackService(req) => {
|
||||
let req = ExecuteRequest::RunStackService(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::RunStackService(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at RunStackService"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::BatchDestroyStack(_) => {
|
||||
// All batch executions must be expanded in `execute_stage`
|
||||
return Err(anyhow!(
|
||||
@@ -1124,6 +1141,23 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::SendAlert(req) => {
|
||||
let req = ExecuteRequest::SendAlert(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::SendAlert(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at SendAlert"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::ClearRepoCache(req) => {
|
||||
let req = ExecuteRequest::ClearRepoCache(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
@@ -1175,6 +1209,23 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::RotateAllServerKeys(req) => {
|
||||
let req = ExecuteRequest::RotateAllServerKeys(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
let ExecuteRequest::RotateAllServerKeys(req) = req else {
|
||||
unreachable!()
|
||||
};
|
||||
let update_id = update.id.clone();
|
||||
handle_resolve_result(
|
||||
req
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Failed at RotateAllServerKeys"),
|
||||
&update_id,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::Sleep(req) => {
|
||||
let duration = Duration::from_millis(req.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
|
||||
@@ -4,7 +4,7 @@ use async_timing_util::{
|
||||
};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use periphery_client::api::image::PruneImages;
|
||||
use periphery_client::api::docker::PruneImages;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
@@ -41,7 +41,10 @@ async fn prune_images() -> anyhow::Result<()> {
|
||||
.map(|server| async move {
|
||||
(
|
||||
async {
|
||||
periphery_client(&server)?.request(PruneImages {}).await
|
||||
periphery_client(&server)
|
||||
.await?
|
||||
.request(PruneImages {})
|
||||
.await
|
||||
}
|
||||
.await,
|
||||
server,
|
||||
@@ -51,8 +54,8 @@ async fn prune_images() -> anyhow::Result<()> {
|
||||
|
||||
while let Some((res, server)) = futures.next().await {
|
||||
if let Err(e) = res {
|
||||
error!(
|
||||
"failed to prune images on server {} ({}) | {e:#}",
|
||||
warn!(
|
||||
"failed to prune images on Server {} ({}) | {e:#}",
|
||||
server.name, server.id
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
str::FromStr,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use async_timing_util::{ONE_MIN_MS, unix_timestamp_ms};
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
@@ -13,29 +8,31 @@ use database::mungos::{
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
action::{Action, ActionState},
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::{Deployment, DeploymentState},
|
||||
docker::container::{ContainerListItem, ContainerStateStatusEnum},
|
||||
permission::{PermissionLevel, PermissionLevelAndSpecifics},
|
||||
procedure::{Procedure, ProcedureState},
|
||||
repo::Repo,
|
||||
server::{Server, ServerState},
|
||||
stack::{Stack, StackServiceNames, StackState},
|
||||
stats::SystemInformation,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
update::Update,
|
||||
user::{User, admin_service_user},
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
use komodo_client::{
|
||||
busy::Busy,
|
||||
entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant,
|
||||
action::{Action, ActionState},
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::{Deployment, DeploymentState},
|
||||
docker::container::{
|
||||
ContainerListItem, ContainerStateStatusEnum,
|
||||
},
|
||||
permission::{PermissionLevel, PermissionLevelAndSpecifics},
|
||||
procedure::{Procedure, ProcedureState},
|
||||
repo::Repo,
|
||||
server::{Server, ServerState},
|
||||
stack::{Stack, StackServiceNames, StackState},
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
update::Update,
|
||||
user::{User, admin_service_user},
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::stats;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
@@ -49,8 +46,6 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
use super::periphery_client;
|
||||
|
||||
// user: Id or username
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn get_user(user: &str) -> anyhow::Result<User> {
|
||||
@@ -79,10 +74,11 @@ pub async fn get_server_state(server: &Server) -> ServerState {
|
||||
if !server.config.enabled {
|
||||
return ServerState::Disabled;
|
||||
}
|
||||
// Unwrap ok: Server disabled check above
|
||||
match super::periphery_client(server)
|
||||
.unwrap()
|
||||
.request(periphery_client::api::GetHealth {})
|
||||
let Ok(periphery) = super::periphery_client(server).await else {
|
||||
return ServerState::NotOk;
|
||||
};
|
||||
match periphery
|
||||
.request(periphery_client::api::GetVersion {})
|
||||
.await
|
||||
{
|
||||
Ok(_) => ServerState::Ok,
|
||||
@@ -408,39 +404,6 @@ pub async fn get_variables_and_secrets()
|
||||
Ok(VariablesAndSecrets { variables, secrets })
|
||||
}
|
||||
|
||||
// This protects the peripheries from spam requests
|
||||
const SYSTEM_INFO_EXPIRY: u128 = ONE_MIN_MS;
|
||||
type SystemInfoCache =
|
||||
Mutex<HashMap<String, Arc<(SystemInformation, u128)>>>;
|
||||
fn system_info_cache() -> &'static SystemInfoCache {
|
||||
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
|
||||
OnceLock::new();
|
||||
SYSTEM_INFO_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
pub async fn get_system_info(
|
||||
server: &Server,
|
||||
) -> anyhow::Result<SystemInformation> {
|
||||
let mut lock = system_info_cache().lock().await;
|
||||
let res = match lock.get(&server.id) {
|
||||
Some(cached) if cached.1 > unix_timestamp_ms() => {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(server)?
|
||||
.request(stats::GetSystemInformation {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
server.id.clone(),
|
||||
(stats.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
|
||||
.into(),
|
||||
);
|
||||
stats
|
||||
}
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get last time procedure / action was run using Update query.
|
||||
/// Ignored whether run was successful.
|
||||
pub async fn get_last_run_at<R: KomodoResource>(
|
||||
@@ -467,7 +430,7 @@ pub async fn get_action_state(id: &String) -> ActionState {
|
||||
.action
|
||||
.get(id)
|
||||
.await
|
||||
.map(|s| s.get().map(|s| s.running))
|
||||
.map(|s| s.get().map(|s| s.busy()))
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
@@ -483,7 +446,7 @@ pub async fn get_procedure_state(id: &String) -> ProcedureState {
|
||||
.procedure
|
||||
.get(id)
|
||||
.await
|
||||
.map(|s| s.get().map(|s| s.running))
|
||||
.map(|s| s.get().map(|s| s.busy()))
|
||||
.transpose()
|
||||
.ok()
|
||||
.flatten()
|
||||
|
||||
@@ -492,6 +492,13 @@ pub async fn init_execution_update(
|
||||
return Ok(Default::default());
|
||||
}
|
||||
|
||||
ExecuteRequest::RunStackService(data) => (
|
||||
Operation::RunStackService,
|
||||
ResourceTarget::Stack(
|
||||
resource::get::<Stack>(&data.stack).await?.id,
|
||||
),
|
||||
),
|
||||
|
||||
// Alerter
|
||||
ExecuteRequest::TestAlerter(data) => (
|
||||
Operation::TestAlerter,
|
||||
@@ -499,6 +506,9 @@ pub async fn init_execution_update(
|
||||
resource::get::<Alerter>(&data.alerter).await?.id,
|
||||
),
|
||||
),
|
||||
ExecuteRequest::SendAlert(_) => {
|
||||
(Operation::SendAlert, ResourceTarget::system())
|
||||
}
|
||||
|
||||
// Maintenance
|
||||
ExecuteRequest::ClearRepoCache(_data) => {
|
||||
@@ -510,6 +520,9 @@ pub async fn init_execution_update(
|
||||
ExecuteRequest::GlobalAutoUpdate(_data) => {
|
||||
(Operation::GlobalAutoUpdate, ResourceTarget::system())
|
||||
}
|
||||
ExecuteRequest::RotateAllServerKeys(_data) => {
|
||||
(Operation::RotateAllServerKeys, ResourceTarget::system())
|
||||
}
|
||||
};
|
||||
|
||||
let mut update = make_update(target, operation, user);
|
||||
|
||||
@@ -2,10 +2,11 @@ use std::sync::Arc;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use axum::{Router, http::HeaderMap};
|
||||
use cache::CloneCache;
|
||||
use komodo_client::entities::resource::Resource;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{helpers::cache::Cache, resource::KomodoResource};
|
||||
use crate::resource::KomodoResource;
|
||||
|
||||
mod integrations;
|
||||
mod resources;
|
||||
@@ -19,7 +20,7 @@ pub fn router() -> Router {
|
||||
.nest("/gitlab", router::router::<gitlab::Gitlab>())
|
||||
}
|
||||
|
||||
type ListenerLockCache = Cache<String, Arc<Mutex<()>>>;
|
||||
type ListenerLockCache = CloneCache<String, Arc<Mutex<()>>>;
|
||||
|
||||
/// Implemented for all resources which can recieve webhook.
|
||||
trait CustomSecret: KomodoResource {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![recursion_limit = "256"]
|
||||
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
@@ -11,17 +13,19 @@ use tower_http::{
|
||||
services::{ServeDir, ServeFile},
|
||||
};
|
||||
|
||||
use crate::config::core_config;
|
||||
use crate::config::{core_config, core_public_key};
|
||||
|
||||
mod alert;
|
||||
mod api;
|
||||
mod auth;
|
||||
mod cloud;
|
||||
mod config;
|
||||
mod connection;
|
||||
mod helpers;
|
||||
mod listener;
|
||||
mod monitor;
|
||||
mod network;
|
||||
mod periphery;
|
||||
mod permission;
|
||||
mod resource;
|
||||
mod schedule;
|
||||
@@ -36,21 +40,26 @@ async fn app() -> anyhow::Result<()> {
|
||||
dotenvy::dotenv().ok();
|
||||
let config = core_config();
|
||||
logger::init(&config.logging)?;
|
||||
if let Err(e) =
|
||||
rustls::crypto::aws_lc_rs::default_provider().install_default()
|
||||
{
|
||||
error!("Failed to install default crypto provider | {e:?}");
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
if core_config().pretty_startup_config {
|
||||
info!("{:#?}", config.sanitized());
|
||||
} else {
|
||||
info!("{:?}", config.sanitized());
|
||||
match (
|
||||
config.pretty_startup_config,
|
||||
config.unsafe_unsanitized_startup_config,
|
||||
) {
|
||||
(true, true) => info!("{:#?}", config),
|
||||
(true, false) => info!("{:#?}", config.sanitized()),
|
||||
(false, true) => info!("{:?}", config),
|
||||
(false, false) => info!("{:?}", config.sanitized()),
|
||||
}
|
||||
|
||||
// Init + log public key. Will crash if invalid private key here.
|
||||
info!("Public Key: {}", core_public_key());
|
||||
|
||||
rustls::crypto::aws_lc_rs::default_provider()
|
||||
.install_default()
|
||||
.expect("Failed to install default crypto provider");
|
||||
|
||||
// Init jwt client to crash on failure
|
||||
state::jwt_client();
|
||||
tokio::join!(
|
||||
@@ -118,9 +127,6 @@ async fn app() -> anyhow::Result<()> {
|
||||
|
||||
if config.ssl_enabled {
|
||||
info!("🔒 Core SSL Enabled");
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("failed to install default rustls CryptoProvider");
|
||||
info!("Komodo Core starting on https://{socket_addr}");
|
||||
let ssl_config = RustlsConfig::from_pem_file(
|
||||
&config.ssl_cert_file,
|
||||
|
||||
@@ -20,7 +20,7 @@ pub async fn alert_deployments(
|
||||
) {
|
||||
let mut alerts = Vec::<Alert>::new();
|
||||
let action_states = action_states();
|
||||
for status in deployment_status_cache().get_list().await {
|
||||
for status in deployment_status_cache().get_values().await {
|
||||
// Don't alert if prev None
|
||||
let Some(prev) = status.prev else {
|
||||
continue;
|
||||
|
||||
@@ -78,7 +78,7 @@ pub async fn alert_servers(
|
||||
ts: i64,
|
||||
mut servers: HashMap<String, Server>,
|
||||
) {
|
||||
let server_statuses = server_status_cache().get_list().await;
|
||||
let server_statuses = server_status_cache().get_values().await;
|
||||
|
||||
let (open_alerts, open_disk_alerts) = match get_open_alerts().await
|
||||
{
|
||||
@@ -178,6 +178,84 @@ pub async fn alert_servers(
|
||||
),
|
||||
}
|
||||
|
||||
// ===================
|
||||
// SERVER VERSION MISMATCH
|
||||
// ===================
|
||||
let core_version = env!("CARGO_PKG_VERSION");
|
||||
let mismatched_server_version =
|
||||
if server_status.state != ServerState::Ok {
|
||||
None
|
||||
} else if let Some(version) =
|
||||
server_status.periphery_info.as_ref().map(|i| &i.version)
|
||||
&& version != core_version
|
||||
{
|
||||
Some(version)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let version_alert = server_alerts.as_ref().and_then(|alerts| {
|
||||
alerts.get(&AlertDataVariant::ServerVersionMismatch)
|
||||
});
|
||||
|
||||
match (mismatched_server_version, version_alert) {
|
||||
(Some(version), None) => {
|
||||
// Only open version mismatch alert if not in maintenance and buffer is ready
|
||||
if !in_maintenance
|
||||
&& buffer.ready_to_open(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerVersionMismatch,
|
||||
)
|
||||
{
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
resolved: false,
|
||||
resolved_ts: None,
|
||||
level: SeverityLevel::Warning,
|
||||
target: ResourceTarget::Server(server_status.id.clone()),
|
||||
data: AlertData::ServerVersionMismatch {
|
||||
id: server_status.id.clone(),
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
server_version: version.clone(),
|
||||
core_version: core_version.to_string(),
|
||||
},
|
||||
};
|
||||
// Use send_unreachable_alerts as a proxy for general server alerts
|
||||
alerts_to_open
|
||||
.push((alert, server.config.send_version_mismatch_alerts))
|
||||
}
|
||||
}
|
||||
(Some(version), Some(alert)) => {
|
||||
// Update existing alert with current version info
|
||||
let mut alert = alert.clone();
|
||||
alert.data = AlertData::ServerVersionMismatch {
|
||||
id: server_status.id.clone(),
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
server_version: version.clone(),
|
||||
core_version: core_version.to_string(),
|
||||
};
|
||||
// Don't send notification for updates
|
||||
alerts_to_update.push((alert, false));
|
||||
}
|
||||
(None, Some(alert)) => {
|
||||
// Version is now correct, close the alert
|
||||
alert_ids_to_close.push((
|
||||
alert.clone(),
|
||||
server.config.send_version_mismatch_alerts,
|
||||
));
|
||||
}
|
||||
(None, None) => {
|
||||
// Reset buffer state when no mismatch and no alert
|
||||
buffer.reset(
|
||||
server_status.id.clone(),
|
||||
AlertDataVariant::ServerVersionMismatch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
let Some(health) = &server_status.health else {
|
||||
continue;
|
||||
};
|
||||
@@ -211,7 +289,7 @@ pub async fn alert_servers(
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
percentage: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.cpu_perc as f64)
|
||||
.unwrap_or(0.0),
|
||||
@@ -233,7 +311,7 @@ pub async fn alert_servers(
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
percentage: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.cpu_perc as f64)
|
||||
.unwrap_or(0.0),
|
||||
@@ -249,7 +327,7 @@ pub async fn alert_servers(
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
percentage: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.cpu_perc as f64)
|
||||
.unwrap_or(0.0),
|
||||
@@ -290,12 +368,12 @@ pub async fn alert_servers(
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
total_gb: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.mem_total_gb)
|
||||
.unwrap_or(0.0),
|
||||
used_gb: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.mem_used_gb)
|
||||
.unwrap_or(0.0),
|
||||
@@ -317,12 +395,12 @@ pub async fn alert_servers(
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
total_gb: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.mem_total_gb)
|
||||
.unwrap_or(0.0),
|
||||
used_gb: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.mem_used_gb)
|
||||
.unwrap_or(0.0),
|
||||
@@ -338,12 +416,12 @@ pub async fn alert_servers(
|
||||
name: server.name.clone(),
|
||||
region: optional_string(&server.config.region),
|
||||
total_gb: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.mem_total_gb)
|
||||
.unwrap_or(0.0),
|
||||
used_gb: server_status
|
||||
.stats
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.map(|s| s.mem_used_gb)
|
||||
.unwrap_or(0.0),
|
||||
@@ -381,7 +459,7 @@ pub async fn alert_servers(
|
||||
)
|
||||
{
|
||||
let disk =
|
||||
server_status.stats.as_ref().and_then(|stats| {
|
||||
server_status.system_stats.as_ref().and_then(|stats| {
|
||||
stats.disks.iter().find(|disk| disk.mount == *path)
|
||||
});
|
||||
let alert = Alert {
|
||||
@@ -416,7 +494,7 @@ pub async fn alert_servers(
|
||||
// modify alert level only if it has increased and not in maintenance
|
||||
if !in_maintenance && health.level < alert.level {
|
||||
let disk =
|
||||
server_status.stats.as_ref().and_then(|stats| {
|
||||
server_status.system_stats.as_ref().and_then(|stats| {
|
||||
stats.disks.iter().find(|disk| disk.mount == *path)
|
||||
});
|
||||
alert.level = health.level;
|
||||
@@ -434,9 +512,10 @@ pub async fn alert_servers(
|
||||
}
|
||||
(SeverityLevel::Ok, Some(alert), true) => {
|
||||
let mut alert = alert.clone();
|
||||
let disk = server_status.stats.as_ref().and_then(|stats| {
|
||||
stats.disks.iter().find(|disk| disk.mount == *path)
|
||||
});
|
||||
let disk =
|
||||
server_status.system_stats.as_ref().and_then(|stats| {
|
||||
stats.disks.iter().find(|disk| disk.mount == *path)
|
||||
});
|
||||
alert.level = health.level;
|
||||
alert.data = AlertData::ServerDisk {
|
||||
id: server_status.id.clone(),
|
||||
|
||||
@@ -19,7 +19,7 @@ pub async fn alert_stacks(
|
||||
) {
|
||||
let action_states = action_states();
|
||||
let mut alerts = Vec::<Alert>::new();
|
||||
for status in stack_status_cache().get_list().await {
|
||||
for status in stack_status_cache().get_values().await {
|
||||
// Don't alert if prev None
|
||||
let Some(prev) = status.prev else {
|
||||
continue;
|
||||
|
||||
@@ -7,11 +7,11 @@ use komodo_client::entities::{
|
||||
},
|
||||
repo::Repo,
|
||||
server::{
|
||||
Server, ServerConfig, ServerHealth, ServerHealthState,
|
||||
ServerState,
|
||||
PeripheryInformation, Server, ServerConfig, ServerHealth,
|
||||
ServerHealthState, ServerState,
|
||||
},
|
||||
stack::{ComposeProject, Stack, StackState},
|
||||
stats::{SingleDiskUsage, SystemStats},
|
||||
stats::{SingleDiskUsage, SystemInformation, SystemStats},
|
||||
};
|
||||
use serror::Serror;
|
||||
|
||||
@@ -103,20 +103,23 @@ type DockerLists = (
|
||||
pub async fn insert_server_status(
|
||||
server: &Server,
|
||||
state: ServerState,
|
||||
version: String,
|
||||
stats: Option<SystemStats>,
|
||||
periphery_info: Option<PeripheryInformation>,
|
||||
system_info: Option<SystemInformation>,
|
||||
system_stats: Option<SystemStats>,
|
||||
(containers, networks, images, volumes, projects): DockerLists,
|
||||
err: impl Into<Option<Serror>>,
|
||||
) {
|
||||
let health = stats.as_ref().map(|s| get_server_health(server, s));
|
||||
let health =
|
||||
system_stats.as_ref().map(|s| get_server_health(server, s));
|
||||
server_status_cache()
|
||||
.insert(
|
||||
server.id.clone(),
|
||||
CachedServerStatus {
|
||||
id: server.id.clone(),
|
||||
state,
|
||||
version,
|
||||
stats,
|
||||
periphery_info,
|
||||
system_info,
|
||||
system_stats,
|
||||
health,
|
||||
containers,
|
||||
networks,
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
use komodo_client::entities::{
|
||||
docker::{
|
||||
container::ContainerListItem, image::ImageListItem,
|
||||
network::NetworkListItem, volume::VolumeListItem,
|
||||
},
|
||||
stack::ComposeProject,
|
||||
};
|
||||
use periphery_client::{
|
||||
PeripheryClient,
|
||||
api::{GetDockerLists, GetDockerListsResponse},
|
||||
};
|
||||
|
||||
pub async fn get_docker_lists(
|
||||
periphery: &PeripheryClient,
|
||||
) -> anyhow::Result<(
|
||||
Vec<ContainerListItem>,
|
||||
Vec<NetworkListItem>,
|
||||
Vec<ImageListItem>,
|
||||
Vec<VolumeListItem>,
|
||||
Vec<ComposeProject>,
|
||||
)> {
|
||||
let GetDockerListsResponse {
|
||||
containers,
|
||||
networks,
|
||||
images,
|
||||
volumes,
|
||||
projects,
|
||||
} = periphery.request(GetDockerLists {}).await?;
|
||||
// TODO: handle the errors
|
||||
let (
|
||||
mut containers,
|
||||
mut networks,
|
||||
mut images,
|
||||
mut volumes,
|
||||
mut projects,
|
||||
) = (
|
||||
containers.unwrap_or_default(),
|
||||
networks.unwrap_or_default(),
|
||||
images.unwrap_or_default(),
|
||||
volumes.unwrap_or_default(),
|
||||
projects.unwrap_or_default(),
|
||||
);
|
||||
|
||||
containers.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
networks.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
images.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
volumes.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
projects.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
Ok((containers, networks, images, volumes, projects))
|
||||
}
|
||||
@@ -1,26 +1,37 @@
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
use async_timing_util::wait_until_timelength;
|
||||
use cache::CloneCache;
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use futures::future::join_all;
|
||||
use helpers::insert_stacks_status_unknown;
|
||||
use komodo_client::entities::{
|
||||
deployment::DeploymentState,
|
||||
build::Build,
|
||||
deployment::{Deployment, DeploymentState},
|
||||
docker::{
|
||||
container::ContainerListItem, image::ImageListItem,
|
||||
network::NetworkListItem, volume::VolumeListItem,
|
||||
},
|
||||
komodo_timestamp, optional_string,
|
||||
server::{Server, ServerHealth, ServerState},
|
||||
stack::{ComposeProject, StackService, StackState},
|
||||
stats::SystemStats,
|
||||
repo::Repo,
|
||||
server::{PeripheryInformation, Server, ServerHealth, ServerState},
|
||||
stack::{ComposeProject, Stack, StackService, StackState},
|
||||
stats::{SystemInformation, SystemStats},
|
||||
};
|
||||
use periphery_client::api::{
|
||||
self, PollStatusResponse, git::GetLatestCommit,
|
||||
};
|
||||
use periphery_client::api::{self, git::GetLatestCommit};
|
||||
use serror::Serror;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::periphery_client,
|
||||
monitor::{alert::check_alerts, record::record_server_stats},
|
||||
state::{db_client, deployment_status_cache, repo_status_cache},
|
||||
state::{
|
||||
db_client, deployment_status_cache, periphery_connections,
|
||||
repo_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
use self::helpers::{
|
||||
@@ -30,7 +41,6 @@ use self::helpers::{
|
||||
|
||||
mod alert;
|
||||
mod helpers;
|
||||
mod lists;
|
||||
mod record;
|
||||
mod resources;
|
||||
|
||||
@@ -44,9 +54,10 @@ pub struct History<Curr: Default, Prev> {
|
||||
pub struct CachedServerStatus {
|
||||
pub id: String,
|
||||
pub state: ServerState,
|
||||
pub version: String,
|
||||
pub stats: Option<SystemStats>,
|
||||
pub health: Option<ServerHealth>,
|
||||
pub periphery_info: Option<PeripheryInformation>,
|
||||
pub system_info: Option<SystemInformation>,
|
||||
pub system_stats: Option<SystemStats>,
|
||||
pub containers: Option<Vec<ContainerListItem>>,
|
||||
pub networks: Option<Vec<NetworkListItem>>,
|
||||
pub images: Option<Vec<ImageListItem>>,
|
||||
@@ -110,72 +121,75 @@ async fn refresh_server_cache(ts: i64) {
|
||||
}
|
||||
};
|
||||
let futures = servers.into_iter().map(|server| async move {
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, false).await;
|
||||
});
|
||||
join_all(futures).await;
|
||||
tokio::join!(check_alerts(ts), record_server_stats(ts));
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn update_cache_for_server(server: &Server) {
|
||||
let (deployments, builds, repos, stacks) = tokio::join!(
|
||||
find_collect(
|
||||
&db_client().deployments,
|
||||
doc! { "config.server_id": &server.id },
|
||||
None,
|
||||
),
|
||||
find_collect(&db_client().builds, doc! {}, None,),
|
||||
find_collect(
|
||||
&db_client().repos,
|
||||
doc! { "config.server_id": &server.id },
|
||||
None,
|
||||
),
|
||||
find_collect(
|
||||
&db_client().stacks,
|
||||
doc! { "config.server_id": &server.id },
|
||||
None,
|
||||
)
|
||||
);
|
||||
/// Makes sure cache for server doesn't update too frequently / simultaneously.
|
||||
/// If forced, will still block against simultaneous update.
|
||||
fn update_cache_for_server_controller()
|
||||
-> &'static CloneCache<String, Arc<Mutex<i64>>> {
|
||||
static CACHE: OnceLock<CloneCache<String, Arc<Mutex<i64>>>> =
|
||||
OnceLock::new();
|
||||
CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
let deployments = deployments.inspect_err(|e| error!("failed to get deployments list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
|
||||
let builds = builds.inspect_err(|e| error!("failed to get builds list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
|
||||
let repos = repos.inspect_err(|e| error!("failed to get repos list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
|
||||
let stacks = stacks.inspect_err(|e| error!("failed to get stacks list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
|
||||
/// The background loop will call this with force: false,
|
||||
/// which exits early if the lock is busy or it was completed too recently.
|
||||
/// If force is true, it will wait on simultaneous calls, and will
|
||||
/// ignore the restriction on being completed too recently.
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn update_cache_for_server(server: &Server, force: bool) {
|
||||
// Concurrency controller to ensure it isn't done too often
|
||||
// when it happens in other contexts.
|
||||
let controller = update_cache_for_server_controller()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
let mut lock = match controller.try_lock() {
|
||||
Ok(lock) => lock,
|
||||
Err(_) if force => controller.lock().await,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
let now = komodo_timestamp();
|
||||
|
||||
// early return if called again sooner than 1s.
|
||||
if !force && *lock > now - 1_000 {
|
||||
return;
|
||||
}
|
||||
|
||||
*lock = now;
|
||||
|
||||
let resources = UpdateCacheResources::load(server).await;
|
||||
|
||||
// Handle server disabled
|
||||
if !server.config.enabled {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
resources.insert_status_unknown().await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::Disabled,
|
||||
String::from("unknown"),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
(None, None, None, None, None),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
periphery_connections().remove(&server.id).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let Ok(periphery) = periphery_client(server) else {
|
||||
error!(
|
||||
"somehow periphery not ok to create. should not be reached."
|
||||
);
|
||||
return;
|
||||
};
|
||||
|
||||
let version = match periphery.request(api::GetVersion {}).await {
|
||||
Ok(version) => version.version,
|
||||
let periphery = match periphery_client(server).await {
|
||||
Ok(periphery) => periphery,
|
||||
Err(e) => {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
resources.insert_status_unknown().await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::NotOk,
|
||||
String::from("Unknown"),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
(None, None, None, None, None),
|
||||
Serror::from(&e),
|
||||
@@ -185,82 +199,75 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
}
|
||||
};
|
||||
|
||||
let stats = if server.config.stats_monitoring {
|
||||
match periphery.request(api::stats::GetSystemStats {}).await {
|
||||
Ok(stats) => Some(filter_volumes(server, stats)),
|
||||
Err(e) => {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_repos_status_unknown(repos).await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::NotOk,
|
||||
String::from("unknown"),
|
||||
None,
|
||||
(None, None, None, None, None),
|
||||
Serror::from(&e),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
let PollStatusResponse {
|
||||
periphery_info,
|
||||
system_info,
|
||||
system_stats,
|
||||
mut containers,
|
||||
networks,
|
||||
images,
|
||||
volumes,
|
||||
projects,
|
||||
} = match periphery
|
||||
.request(api::PollStatus {
|
||||
include_stats: server.config.stats_monitoring,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(info) => info,
|
||||
Err(e) => {
|
||||
resources.insert_status_unknown().await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::NotOk,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
(None, None, None, None, None),
|
||||
Serror::from(&e),
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match lists::get_docker_lists(&periphery).await {
|
||||
Ok((mut containers, networks, images, volumes, projects)) => {
|
||||
containers.iter_mut().for_each(|container| {
|
||||
container.server_id = Some(server.id.clone())
|
||||
});
|
||||
tokio::join!(
|
||||
resources::update_deployment_cache(
|
||||
server.name.clone(),
|
||||
deployments,
|
||||
&containers,
|
||||
&images,
|
||||
&builds,
|
||||
),
|
||||
resources::update_stack_cache(
|
||||
server.name.clone(),
|
||||
stacks,
|
||||
&containers,
|
||||
&images
|
||||
),
|
||||
);
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::Ok,
|
||||
version,
|
||||
stats,
|
||||
(
|
||||
Some(containers.clone()),
|
||||
Some(networks),
|
||||
Some(images),
|
||||
Some(volumes),
|
||||
Some(projects),
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
insert_deployments_status_unknown(deployments).await;
|
||||
insert_stacks_status_unknown(stacks).await;
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::Ok,
|
||||
version,
|
||||
stats,
|
||||
(None, None, None, None, None),
|
||||
Some(e.into()),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
containers.iter_mut().for_each(|container| {
|
||||
container.server_id = Some(server.id.clone())
|
||||
});
|
||||
tokio::join!(
|
||||
resources::update_deployment_cache(
|
||||
server.name.clone(),
|
||||
resources.deployments,
|
||||
&containers,
|
||||
&images,
|
||||
&resources.builds,
|
||||
),
|
||||
resources::update_stack_cache(
|
||||
server.name.clone(),
|
||||
resources.stacks,
|
||||
&containers,
|
||||
&images
|
||||
),
|
||||
);
|
||||
insert_server_status(
|
||||
server,
|
||||
ServerState::Ok,
|
||||
Some(periphery_info),
|
||||
Some(system_info),
|
||||
system_stats.map(|stats| filter_volumes(server, stats)),
|
||||
(
|
||||
Some(containers.clone()),
|
||||
Some(networks),
|
||||
Some(images),
|
||||
Some(volumes),
|
||||
Some(projects),
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
let status_cache = repo_status_cache();
|
||||
for repo in repos {
|
||||
for repo in resources.repos {
|
||||
let (latest_hash, latest_message) = periphery
|
||||
.request(GetLatestCommit {
|
||||
name: repo.name.clone(),
|
||||
@@ -284,6 +291,54 @@ pub async fn update_cache_for_server(server: &Server) {
|
||||
}
|
||||
}
|
||||
|
||||
struct UpdateCacheResources {
|
||||
stacks: Vec<Stack>,
|
||||
deployments: Vec<Deployment>,
|
||||
builds: Vec<Build>,
|
||||
repos: Vec<Repo>,
|
||||
}
|
||||
|
||||
impl UpdateCacheResources {
|
||||
pub async fn load(server: &Server) -> Self {
|
||||
let (stacks, deployments, builds, repos) = tokio::join!(
|
||||
find_collect(
|
||||
&db_client().stacks,
|
||||
doc! { "config.server_id": &server.id },
|
||||
None,
|
||||
),
|
||||
find_collect(
|
||||
&db_client().deployments,
|
||||
doc! { "config.server_id": &server.id },
|
||||
None,
|
||||
),
|
||||
find_collect(&db_client().builds, doc! {}, None,),
|
||||
find_collect(
|
||||
&db_client().repos,
|
||||
doc! { "config.server_id": &server.id },
|
||||
None,
|
||||
),
|
||||
);
|
||||
|
||||
let stacks = stacks.inspect_err(|e| error!("failed to get stacks list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
|
||||
let deployments = deployments.inspect_err(|e| error!("failed to get deployments list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
|
||||
let builds = builds.inspect_err(|e| error!("failed to get builds list from db (update status cache) | server : {} | {e:#}", server.name)).unwrap_or_default();
|
||||
let repos = repos.inspect_err(|e| error!("failed to get repos list from db (update status cache) | server: {} | {e:#}", server.name)).unwrap_or_default();
|
||||
|
||||
Self {
|
||||
stacks,
|
||||
deployments,
|
||||
builds,
|
||||
repos,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert_status_unknown(self) {
|
||||
insert_stacks_status_unknown(self.stacks).await;
|
||||
insert_deployments_status_unknown(self.deployments).await;
|
||||
insert_repos_status_unknown(self.repos).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_volumes(
|
||||
server: &Server,
|
||||
mut stats: SystemStats,
|
||||
|
||||
@@ -6,11 +6,11 @@ use crate::state::{db_client, server_status_cache};
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn record_server_stats(ts: i64) {
|
||||
let status = server_status_cache().get_list().await;
|
||||
let status = server_status_cache().get_values().await;
|
||||
let records = status
|
||||
.into_iter()
|
||||
.filter_map(|status| {
|
||||
let stats = status.stats.as_ref()?;
|
||||
let stats = status.system_stats.as_ref()?;
|
||||
|
||||
let TotalDiskUsage {
|
||||
used_gb: disk_used_gb,
|
||||
@@ -21,6 +21,7 @@ pub async fn record_server_stats(ts: i64) {
|
||||
ts,
|
||||
sid: status.id.clone(),
|
||||
cpu_perc: stats.cpu_perc,
|
||||
load_average: stats.load_average.clone(),
|
||||
mem_total_gb: stats.mem_total_gb,
|
||||
mem_used_gb: stats.mem_used_gb,
|
||||
disk_total_gb,
|
||||
|
||||
@@ -81,7 +81,7 @@ pub async fn update_deployment_cache(
|
||||
// If image already has tag, leave it,
|
||||
// otherwise default the tag to latest
|
||||
if image.contains(':') {
|
||||
image
|
||||
image.to_string()
|
||||
} else {
|
||||
format!("{image}:latest")
|
||||
}
|
||||
@@ -92,6 +92,9 @@ pub async fn update_deployment_cache(
|
||||
..
|
||||
}) = &container
|
||||
{
|
||||
// Docker will automatically strip `docker.io` from incoming image names re #468.
|
||||
// Need to strip it in order to match by image name and find available updates.
|
||||
let image = image.strip_prefix("docker.io/").unwrap_or(&image);
|
||||
images
|
||||
.iter()
|
||||
.find(|i| i.name == image)
|
||||
@@ -250,20 +253,21 @@ pub async fn update_stack_cache(
|
||||
}
|
||||
}.is_match(&container.name)
|
||||
}).cloned();
|
||||
// If image already has tag, leave it,
|
||||
// otherwise default the tag to latest
|
||||
let image = image.clone();
|
||||
let image = if image.contains(':') {
|
||||
image
|
||||
image.to_string()
|
||||
} else {
|
||||
image + ":latest"
|
||||
format!("{image}:latest")
|
||||
};
|
||||
let update_available = if let Some(ContainerListItem { image_id: Some(curr_image_id), .. }) = &container {
|
||||
// Docker will automatically strip `docker.io` from incoming image names re #468.
|
||||
// Need to strip it in order to match by image tag and find available update.
|
||||
let image =
|
||||
image.strip_prefix("docker.io/").unwrap_or(&image);
|
||||
images
|
||||
.iter()
|
||||
.find(|i| i.name == image)
|
||||
.map(|i| &i.id != curr_image_id)
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.find(|i| i.name == image)
|
||||
.map(|i| &i.id != curr_image_id)
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
@@ -36,10 +36,10 @@ fn is_container_environment() -> bool {
|
||||
}
|
||||
|
||||
// Check cgroup for container runtime indicators
|
||||
if let Ok(content) = std::fs::read_to_string(CGROUP_FILE) {
|
||||
if content.contains("docker") || content.contains("containerd") {
|
||||
return true;
|
||||
}
|
||||
if let Ok(content) = std::fs::read_to_string(CGROUP_FILE)
|
||||
&& (content.contains("docker") || content.contains("containerd"))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
@@ -142,7 +142,7 @@ async fn find_gateway(
|
||||
}
|
||||
|
||||
let ip_cidr = ip_cidr.ok_or_else(|| anyhow!(
|
||||
"Could not find IP address for interface '{}'. Ensure interface has a valid IPv4 address",
|
||||
"Could not find IP address for interface '{}'. Ensure interface has a valid IPv4 address",
|
||||
interface_name
|
||||
))?;
|
||||
|
||||
@@ -167,14 +167,13 @@ async fn find_gateway(
|
||||
if line.contains("via") {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if let Some(via_idx) = parts.iter().position(|&x| x == "via")
|
||||
&& let Some(&gateway) = parts.get(via_idx + 1)
|
||||
{
|
||||
if let Some(&gateway) = parts.get(via_idx + 1) {
|
||||
trace!(
|
||||
"Found gateway {} for {} from routing table",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
trace!(
|
||||
"Found gateway {} for {} from routing table",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -206,14 +205,14 @@ async fn find_gateway(
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(output) = route_test {
|
||||
if output.status.success() {
|
||||
trace!(
|
||||
"Gateway {} is reachable via {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
if let Ok(output) = route_test
|
||||
&& output.status.success()
|
||||
{
|
||||
trace!(
|
||||
"Gateway {} is reachable via {}",
|
||||
gateway, interface_name
|
||||
);
|
||||
return Ok(gateway.to_string());
|
||||
}
|
||||
|
||||
// Fallback: assume .1 is gateway (Docker standard)
|
||||
@@ -266,10 +265,10 @@ async fn set_default_gateway(
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(output) = remove_default {
|
||||
if output.status.success() {
|
||||
trace!("Removed existing default routes");
|
||||
}
|
||||
if let Ok(output) = remove_default
|
||||
&& output.status.success()
|
||||
{
|
||||
trace!("Removed existing default routes");
|
||||
}
|
||||
|
||||
// Add new default route
|
||||
|
||||
205
bin/core/src/periphery/mod.rs
Normal file
205
bin/core/src/periphery/mod.rs
Normal file
@@ -0,0 +1,205 @@
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use bytes::Bytes;
|
||||
use cache::CloneCache;
|
||||
use periphery_client::api;
|
||||
use resolver_api::HasResponse;
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serde_json::json;
|
||||
use serror::deserialize_error_bytes;
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tracing::warn;
|
||||
use transport::{
|
||||
MessageState,
|
||||
bytes::{from_transport_bytes, to_transport_bytes},
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
connection::{PeripheryConnection, PeripheryConnectionArgs},
|
||||
state::periphery_connections,
|
||||
};
|
||||
|
||||
pub mod terminal;
|
||||
|
||||
pub type ConnectionChannels = CloneCache<Uuid, Sender<Bytes>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PeripheryClient {
|
||||
pub id: String,
|
||||
channels: Arc<ConnectionChannels>,
|
||||
}
|
||||
|
||||
impl PeripheryClient {
|
||||
pub async fn new(
|
||||
args: PeripheryConnectionArgs<'_>,
|
||||
insecure_tls: bool,
|
||||
// deprecated.
|
||||
passkey: &str,
|
||||
) -> anyhow::Result<PeripheryClient> {
|
||||
let connections = periphery_connections();
|
||||
|
||||
let id = args.id.to_string();
|
||||
|
||||
// Spawn client side connection if one doesn't exist.
|
||||
let Some(connection) = connections.get(&id).await else {
|
||||
if args.address.is_none() {
|
||||
return Err(anyhow!("Server {id} is not connected"));
|
||||
}
|
||||
let channels = args
|
||||
.spawn_client_connection(
|
||||
id.clone(),
|
||||
insecure_tls,
|
||||
passkey.to_string(),
|
||||
)
|
||||
.await?;
|
||||
return Ok(PeripheryClient { id, channels });
|
||||
};
|
||||
|
||||
// Ensure the connection args are unchanged.
|
||||
if args.matches(&connection.args) {
|
||||
return Ok(PeripheryClient {
|
||||
id,
|
||||
channels: connection.channels.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
// The args have changed.
|
||||
if args.address.is_none() {
|
||||
// Periphery -> Core connection
|
||||
// Remove this connection, wait and see if client reconnects
|
||||
connections.remove(&id).await;
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
let connection = connections
|
||||
.get(&id)
|
||||
.await
|
||||
.with_context(|| format!("Server {id} is not connected"))?;
|
||||
Ok(PeripheryClient {
|
||||
id,
|
||||
channels: connection.channels.clone(),
|
||||
})
|
||||
} else {
|
||||
// Core -> Periphery connection
|
||||
let channels = args
|
||||
.spawn_client_connection(
|
||||
id.clone(),
|
||||
insecure_tls,
|
||||
passkey.to_string(),
|
||||
)
|
||||
.await?;
|
||||
Ok(PeripheryClient { id, channels })
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn cleanup(self) -> Option<Arc<PeripheryConnection>> {
|
||||
periphery_connections().remove(&self.id).await
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub async fn health_check(&self) -> anyhow::Result<()> {
|
||||
self.request(api::GetHealth {}).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(
|
||||
name = "PeripheryRequest",
|
||||
skip(self),
|
||||
level = "debug"
|
||||
)]
|
||||
pub async fn request<T>(
|
||||
&self,
|
||||
request: T,
|
||||
) -> anyhow::Result<T::Response>
|
||||
where
|
||||
T: std::fmt::Debug + Serialize + HasResponse,
|
||||
T::Response: DeserializeOwned,
|
||||
{
|
||||
let connection =
|
||||
periphery_connections().get(&self.id).await.with_context(
|
||||
|| format!("No connection found for server {}", self.id),
|
||||
)?;
|
||||
|
||||
// Polls connected 3 times before bailing
|
||||
connection.bail_if_not_connected().await?;
|
||||
|
||||
let id = Uuid::new_v4();
|
||||
let (response_sender, mut response_receiever) =
|
||||
mpsc::channel(1000);
|
||||
self.channels.insert(id, response_sender).await;
|
||||
|
||||
let req_type = T::req_type();
|
||||
let data = serde_json::to_vec(&json!({
|
||||
"type": req_type,
|
||||
"params": request
|
||||
}))
|
||||
.context("Failed to serialize request to bytes")?;
|
||||
|
||||
if let Err(e) = connection
|
||||
.send(to_transport_bytes(data, id, MessageState::Request))
|
||||
.await
|
||||
.context("Failed to send request over channel")
|
||||
{
|
||||
// cleanup
|
||||
self.channels.remove(&id).await;
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// Poll for the associated response
|
||||
loop {
|
||||
let next = tokio::select! {
|
||||
msg = response_receiever.recv() => msg,
|
||||
// Periphery will send InProgress every 5s to avoid timeout
|
||||
_ = tokio::time::sleep(Duration::from_secs(10)) => {
|
||||
return Err(anyhow!("Response timed out"));
|
||||
}
|
||||
};
|
||||
|
||||
let bytes = match next {
|
||||
Some(bytes) => bytes,
|
||||
None => {
|
||||
return Err(anyhow!(
|
||||
"Sender dropped before response was recieved"
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let (state, data) = match from_transport_bytes(bytes) {
|
||||
Ok((data, _, state)) if !data.is_empty() => (state, data),
|
||||
// Ignore no data cases
|
||||
Ok(_) => continue,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Server {} | Received invalid message | {e:#}",
|
||||
self.id
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
match state {
|
||||
// TODO: improve the allocation in .to_vec
|
||||
MessageState::Successful => {
|
||||
// cleanup
|
||||
self.channels.remove(&id).await;
|
||||
return serde_json::from_slice(&data)
|
||||
.context("Failed to parse successful response");
|
||||
}
|
||||
MessageState::Failed => {
|
||||
// cleanup
|
||||
self.channels.remove(&id).await;
|
||||
return Err(deserialize_error_bytes(&data));
|
||||
}
|
||||
MessageState::InProgress => continue,
|
||||
// Shouldn't be received by this receiver
|
||||
other => {
|
||||
// TODO: delete log
|
||||
warn!(
|
||||
"Server {} | Got other message over over response channel: {other:?}",
|
||||
self.id
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
210
bin/core/src/periphery/terminal.rs
Normal file
210
bin/core/src/periphery/terminal.rs
Normal file
@@ -0,0 +1,210 @@
|
||||
use std::{
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
task::{self, Poll},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use bytes::Bytes;
|
||||
use cache::CloneCache;
|
||||
use futures::Stream;
|
||||
use periphery_client::api::terminal::{
|
||||
ConnectContainerExec, ConnectTerminal, END_OF_OUTPUT,
|
||||
ExecuteContainerExec, ExecuteTerminal,
|
||||
};
|
||||
use tokio::sync::mpsc::{Receiver, Sender, channel};
|
||||
use transport::bytes::data_from_transport_bytes;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
periphery::PeripheryClient, state::periphery_connections,
|
||||
};
|
||||
|
||||
impl PeripheryClient {
|
||||
pub async fn connect_terminal(
|
||||
&self,
|
||||
terminal: String,
|
||||
) -> anyhow::Result<(Uuid, Sender<Bytes>, Receiver<Bytes>)> {
|
||||
tracing::trace!(
|
||||
"request | type: ConnectTerminal | terminal name: {terminal}",
|
||||
);
|
||||
|
||||
let connection =
|
||||
periphery_connections().get(&self.id).await.with_context(
|
||||
|| format!("No connection found for server {}", self.id),
|
||||
)?;
|
||||
|
||||
let id = self
|
||||
.request(ConnectTerminal { terminal })
|
||||
.await
|
||||
.context("Failed to create terminal connection")?;
|
||||
|
||||
let (sender, receiever) = channel(1024);
|
||||
connection.channels.insert(id, sender).await;
|
||||
|
||||
Ok((id, connection.sender.clone(), receiever))
|
||||
}
|
||||
|
||||
pub async fn connect_container_exec(
|
||||
&self,
|
||||
container: String,
|
||||
shell: String,
|
||||
) -> anyhow::Result<(Uuid, Sender<Bytes>, Receiver<Bytes>)> {
|
||||
tracing::trace!(
|
||||
"request | type: ConnectContainerExec | container name: {container} | shell: {shell}",
|
||||
);
|
||||
|
||||
let connection =
|
||||
periphery_connections().get(&self.id).await.with_context(
|
||||
|| format!("No connection found for server {}", self.id),
|
||||
)?;
|
||||
|
||||
let id = self
|
||||
.request(ConnectContainerExec { container, shell })
|
||||
.await
|
||||
.context("Failed to create container exec connection")?;
|
||||
|
||||
let (sender, receiever) = channel(1000);
|
||||
connection.channels.insert(id, sender).await;
|
||||
|
||||
Ok((id, connection.sender.clone(), receiever))
|
||||
}
|
||||
|
||||
/// Executes command on specified terminal,
|
||||
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
|
||||
/// sentinal value as the expected final line of the stream.
|
||||
///
|
||||
/// Example final line:
|
||||
/// ```text
|
||||
/// __KOMODO_EXIT_CODE:0
|
||||
/// ```
|
||||
///
|
||||
/// This means the command exited with code 0 (success).
|
||||
///
|
||||
/// If this value is NOT the final item before stream closes, it means
|
||||
/// the terminal exited mid command, before giving status. Example: running `exit`.
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub async fn execute_terminal(
|
||||
&self,
|
||||
terminal: String,
|
||||
command: String,
|
||||
) -> anyhow::Result<
|
||||
impl Stream<Item = anyhow::Result<Bytes>> + 'static,
|
||||
> {
|
||||
tracing::trace!(
|
||||
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
|
||||
);
|
||||
|
||||
let connection =
|
||||
periphery_connections().get(&self.id).await.with_context(
|
||||
|| format!("No connection found for server {}", self.id),
|
||||
)?;
|
||||
|
||||
let id = self
|
||||
.request(ExecuteTerminal { terminal, command })
|
||||
.await
|
||||
.context("Failed to create execute terminal connection")?;
|
||||
|
||||
let (sender, receiver) = channel(1000);
|
||||
|
||||
connection.channels.insert(id, sender).await;
|
||||
|
||||
Ok(ReceiverStream {
|
||||
id,
|
||||
receiver,
|
||||
channels: connection.channels.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Executes command on specified container,
|
||||
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
|
||||
/// sentinal value as the expected final line of the stream.
|
||||
///
|
||||
/// Example final line:
|
||||
/// ```text
|
||||
/// __KOMODO_EXIT_CODE:0
|
||||
/// ```
|
||||
///
|
||||
/// This means the command exited with code 0 (success).
|
||||
///
|
||||
/// If this value is NOT the final item before stream closes, it means
|
||||
/// the container shell exited mid command, before giving status. Example: running `exit`.
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub async fn execute_container_exec(
|
||||
&self,
|
||||
container: String,
|
||||
shell: String,
|
||||
command: String,
|
||||
) -> anyhow::Result<ReceiverStream> {
|
||||
tracing::trace!(
|
||||
"sending request | type: ExecuteContainerExec | container: {container} | shell: {shell} | command: {command}",
|
||||
);
|
||||
|
||||
let connection =
|
||||
periphery_connections().get(&self.id).await.with_context(
|
||||
|| format!("No connection found for server {}", self.id),
|
||||
)?;
|
||||
|
||||
let id = self
|
||||
.request(ExecuteContainerExec {
|
||||
container,
|
||||
shell,
|
||||
command,
|
||||
})
|
||||
.await
|
||||
.context("Failed to create execute terminal connection")?;
|
||||
|
||||
let (sender, receiver) = channel(1000);
|
||||
|
||||
connection.channels.insert(id, sender).await;
|
||||
|
||||
Ok(ReceiverStream {
|
||||
id,
|
||||
receiver,
|
||||
channels: connection.channels.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ReceiverStream {
|
||||
id: Uuid,
|
||||
channels: Arc<CloneCache<Uuid, Sender<Bytes>>>,
|
||||
receiver: Receiver<Bytes>,
|
||||
}
|
||||
|
||||
impl Stream for ReceiverStream {
|
||||
type Item = anyhow::Result<Bytes>;
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut task::Context<'_>,
|
||||
) -> Poll<Option<Self::Item>> {
|
||||
match self
|
||||
.receiver
|
||||
.poll_recv(cx)
|
||||
.map(|bytes| bytes.map(data_from_transport_bytes))
|
||||
{
|
||||
Poll::Ready(Some(Ok(bytes))) if bytes == END_OF_OUTPUT => {
|
||||
self.cleanup();
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Ready(Some(Ok(bytes))) => Poll::Ready(Some(Ok(bytes))),
|
||||
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
|
||||
Poll::Ready(None) => {
|
||||
self.cleanup();
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReceiverStream {
|
||||
fn cleanup(&self) {
|
||||
// Not the prettiest but it should be fine
|
||||
let channels = self.channels.clone();
|
||||
let id = self.id;
|
||||
tokio::spawn(async move {
|
||||
channels.remove(&id).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -116,9 +116,11 @@ impl super::KomodoResource for Build {
|
||||
git_provider,
|
||||
repo,
|
||||
branch,
|
||||
image_registry_domain: optional_string(
|
||||
build.config.image_registry.domain,
|
||||
),
|
||||
image_registry_domain: build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.and_then(|r| optional_string(&r.domain)),
|
||||
built_hash: build.info.built_hash,
|
||||
latest_hash: build.info.latest_hash,
|
||||
state,
|
||||
|
||||
@@ -188,7 +188,7 @@ impl super::KomodoResource for Deployment {
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ impl super::KomodoResource for Deployment {
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
let periphery = match periphery_client(&server) {
|
||||
let periphery = match periphery_client(&server).await {
|
||||
Ok(periphery) => periphery,
|
||||
Err(e) => {
|
||||
// This case won't ever happen, as periphery_client only fallible if the server is disabled.
|
||||
|
||||
@@ -34,8 +34,10 @@ use komodo_client::{
|
||||
parsers::parse_string_list,
|
||||
};
|
||||
use partial_derive2::{Diff, MaybeNone, PartialDiff};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
api::{read::ReadArgs, write::WriteArgs},
|
||||
@@ -77,6 +79,7 @@ pub use refresh::{
|
||||
pub use repo::{
|
||||
refresh_repo_state_cache, spawn_repo_state_refresh_loop,
|
||||
};
|
||||
pub use server::{rotate_server_keys, update_server_public_key};
|
||||
|
||||
/// Implement on each Komodo resource for common methods
|
||||
pub trait KomodoResource {
|
||||
@@ -229,13 +232,19 @@ pub trait KomodoResource {
|
||||
pub async fn get<T: KomodoResource>(
|
||||
id_or_name: &str,
|
||||
) -> anyhow::Result<Resource<T::Config, T::Info>> {
|
||||
if id_or_name.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Cannot find {} with empty name / id",
|
||||
T::resource_type()
|
||||
));
|
||||
}
|
||||
T::coll()
|
||||
.find_one(id_or_name_filter(id_or_name))
|
||||
.await
|
||||
.context("failed to query db for resource")?
|
||||
.context("Failed to query db for resource")?
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"did not find any {} matching {id_or_name}",
|
||||
"Did not find any {} matching {id_or_name}",
|
||||
T::resource_type()
|
||||
)
|
||||
})
|
||||
@@ -388,7 +397,7 @@ pub async fn list_full_for_user_using_document<T: KomodoResource>(
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to pull {}s from mongo", T::resource_type())
|
||||
format!("Failed to pull {}s from mongo", T::resource_type())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -408,7 +417,7 @@ pub async fn get_id_to_resource_map<T: KomodoResource>(
|
||||
let res = find_collect(T::coll(), None, None)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to pull {}s from mongo", T::resource_type())
|
||||
format!("Failed to pull {}s from mongo", T::resource_type())
|
||||
})?
|
||||
.into_iter()
|
||||
.filter(|resource| {
|
||||
@@ -451,23 +460,33 @@ pub async fn get_id_to_resource_map<T: KomodoResource>(
|
||||
pub async fn create<T: KomodoResource>(
|
||||
name: &str,
|
||||
mut config: T::PartialConfig,
|
||||
info: Option<T::Info>,
|
||||
user: &User,
|
||||
) -> anyhow::Result<Resource<T::Config, T::Info>> {
|
||||
) -> serror::Result<Resource<T::Config, T::Info>> {
|
||||
if !T::user_can_create(user) {
|
||||
return Err(anyhow!(
|
||||
"User does not have permissions to create {}.",
|
||||
T::resource_type()
|
||||
));
|
||||
return Err(
|
||||
anyhow!(
|
||||
"User does not have permissions to create {}.",
|
||||
T::resource_type()
|
||||
)
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if name.is_empty() {
|
||||
return Err(anyhow!("Must provide non-empty name for resource."));
|
||||
return Err(
|
||||
anyhow!("Must provide non-empty name for resource")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let name = T::validated_name(name);
|
||||
|
||||
if ObjectId::from_str(&name).is_ok() {
|
||||
return Err(anyhow!("valid ObjectIds cannot be used as names."));
|
||||
return Err(
|
||||
anyhow!("Valid ObjectIds cannot be used as names")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure an existing resource with same name doesn't already exist
|
||||
@@ -483,7 +502,10 @@ pub async fn create<T: KomodoResource>(
|
||||
.into_iter()
|
||||
.any(|r| r.name == name)
|
||||
{
|
||||
return Err(anyhow!("Must provide unique name for resource."));
|
||||
return Err(
|
||||
anyhow!("Resource with name '{}' already exists", name)
|
||||
.status_code(StatusCode::CONFLICT),
|
||||
);
|
||||
}
|
||||
|
||||
let start_ts = komodo_timestamp();
|
||||
@@ -497,7 +519,11 @@ pub async fn create<T: KomodoResource>(
|
||||
template: Default::default(),
|
||||
tags: Default::default(),
|
||||
config: config.into(),
|
||||
info: T::default_info().await?,
|
||||
info: if let Some(info) = info {
|
||||
info
|
||||
} else {
|
||||
T::default_info().await?
|
||||
},
|
||||
base_permission: PermissionLevel::None.into(),
|
||||
updated_at: start_ts,
|
||||
};
|
||||
@@ -506,11 +532,11 @@ pub async fn create<T: KomodoResource>(
|
||||
.insert_one(&resource)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to add {} to db", T::resource_type())
|
||||
format!("Failed to add {} to db", T::resource_type())
|
||||
})?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.context("Inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
let resource = get::<T>(&resource_id).await?;
|
||||
@@ -527,18 +553,18 @@ pub async fn create<T: KomodoResource>(
|
||||
let mut update = make_update(target, T::create_operation(), user);
|
||||
update.start_ts = start_ts;
|
||||
update.push_simple_log(
|
||||
&format!("create {}", T::resource_type()),
|
||||
&format!("Create {}", T::resource_type()),
|
||||
format!(
|
||||
"created {}\nid: {}\nname: {}",
|
||||
"Created {}\nid: {}\nname: {}",
|
||||
T::resource_type(),
|
||||
resource.id,
|
||||
resource.name
|
||||
),
|
||||
);
|
||||
update.push_simple_log(
|
||||
"config",
|
||||
"Config",
|
||||
serde_json::to_string_pretty(&resource.config)
|
||||
.context("failed to serialize resource config to JSON")?,
|
||||
.context("Failed to serialize resource config to JSON")?,
|
||||
);
|
||||
|
||||
T::post_create(&resource, &mut update).await?;
|
||||
@@ -734,7 +760,7 @@ pub async fn remove_tag_from_all<T: KomodoResource>(
|
||||
T::coll()
|
||||
.update_many(doc! {}, doc! { "$pull": { "tags": tag_id } })
|
||||
.await
|
||||
.context("failed to remove tag from resources")?;
|
||||
.context("Failed to remove tag from resources")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -847,9 +873,15 @@ pub async fn delete<T: KomodoResource>(
|
||||
);
|
||||
update.push_simple_log("Deleted Toml", toml);
|
||||
|
||||
if let Err(e) = T::post_delete(&resource, &mut update).await {
|
||||
update.push_error_log("post delete", format_serror(&e.into()));
|
||||
}
|
||||
tokio::join!(
|
||||
async {
|
||||
if let Err(e) = T::post_delete(&resource, &mut update).await {
|
||||
update
|
||||
.push_error_log("Post delete", format_serror(&e.into()));
|
||||
}
|
||||
},
|
||||
delete_from_alerters::<T>(&resource.id)
|
||||
);
|
||||
|
||||
refresh_all_resources_cache().await;
|
||||
|
||||
@@ -859,6 +891,26 @@ pub async fn delete<T: KomodoResource>(
|
||||
Ok(resource)
|
||||
}
|
||||
|
||||
async fn delete_from_alerters<T: KomodoResource>(id: &str) {
|
||||
let target_bson = doc! {
|
||||
"type": T::resource_type().as_ref(),
|
||||
"id": id,
|
||||
};
|
||||
if let Err(e) = db_client()
|
||||
.alerters
|
||||
.update_many(Document::new(), doc! {
|
||||
"$pull": {
|
||||
"config.resources": &target_bson,
|
||||
"config.except_resources": target_bson,
|
||||
}
|
||||
})
|
||||
.await
|
||||
.context("Failed to clear deleted resource from alerter whitelist / blacklist")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
// =======
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
@@ -898,7 +950,7 @@ where
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to delete_many permissions matching target {target:?} | {e:#}"
|
||||
"Failed to delete_many permissions matching target {target:?} | {e:#}"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -933,7 +985,7 @@ where
|
||||
},
|
||||
)
|
||||
.await
|
||||
.context("failed to remove resource from users recently viewed")
|
||||
.context("Failed to remove resource from users recently viewed")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{Collection, bson::doc, options::FindOneOptions},
|
||||
};
|
||||
use futures::{TryStreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::Execution,
|
||||
entities::{
|
||||
@@ -709,6 +710,15 @@ async fn validate_config(
|
||||
.await?;
|
||||
params.stack = stack.id;
|
||||
}
|
||||
Execution::RunStackService(params) => {
|
||||
let stack = super::get_check_permissions::<Stack>(
|
||||
¶ms.stack,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
params.stack = stack.id;
|
||||
}
|
||||
Execution::BatchDestroyStack(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
@@ -725,6 +735,24 @@ async fn validate_config(
|
||||
.await?;
|
||||
params.alerter = alerter.id;
|
||||
}
|
||||
Execution::SendAlert(params) => {
|
||||
params.alerters = params
|
||||
.alerters
|
||||
.iter()
|
||||
.map(async |alerter| {
|
||||
let id = super::get_check_permissions::<Alerter>(
|
||||
alerter,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?
|
||||
.id;
|
||||
anyhow::Ok(id)
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
}
|
||||
Execution::ClearRepoCache(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
@@ -746,6 +774,13 @@ async fn validate_config(
|
||||
));
|
||||
}
|
||||
}
|
||||
Execution::RotateAllServerKeys(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Non admin user cannot trigger rotate all server keys"
|
||||
));
|
||||
}
|
||||
}
|
||||
Execution::Sleep(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,7 +169,7 @@ impl super::KomodoResource for Repo {
|
||||
}
|
||||
|
||||
let server = super::get::<Server>(&repo.config.server_id).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
match periphery
|
||||
.request(DeleteRepo {
|
||||
@@ -300,6 +300,7 @@ async fn get_repo_state_from_db(id: &str) -> RepoState {
|
||||
"$or": [
|
||||
{ "operation": "CloneRepo" },
|
||||
{ "operation": "PullRepo" },
|
||||
{ "operation": "BuildRepo" },
|
||||
],
|
||||
})
|
||||
.with_options(
|
||||
|
||||
@@ -1,30 +1,41 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use database::mungos::mongodb::{Collection, bson::doc};
|
||||
use database::mungos::mongodb::{
|
||||
Collection,
|
||||
bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use indexmap::IndexSet;
|
||||
use komodo_client::entities::{
|
||||
Operation, ResourceTarget, ResourceTargetVariant, komodo_timestamp,
|
||||
optional_string,
|
||||
permission::SpecificPermission,
|
||||
resource::Resource,
|
||||
server::{
|
||||
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
|
||||
ServerListItem, ServerListItemInfo, ServerQuerySpecifics,
|
||||
ServerInfo, ServerListItem, ServerListItemInfo,
|
||||
ServerQuerySpecifics,
|
||||
},
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
use periphery_client::api;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_system_info,
|
||||
helpers::periphery_client,
|
||||
monitor::update_cache_for_server,
|
||||
state::{action_states, db_client, server_status_cache},
|
||||
state::{
|
||||
action_states, db_client, periphery_connections,
|
||||
server_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
impl super::KomodoResource for Server {
|
||||
type Config = ServerConfig;
|
||||
type PartialConfig = PartialServerConfig;
|
||||
type ConfigDiff = ServerConfigDiff;
|
||||
type Info = ();
|
||||
type Info = ServerInfo;
|
||||
type ListItem = ServerListItem;
|
||||
type QuerySpecifics = ServerQuerySpecifics;
|
||||
|
||||
@@ -57,11 +68,21 @@ impl super::KomodoResource for Server {
|
||||
server: Resource<Self::Config, Self::Info>,
|
||||
) -> Self::ListItem {
|
||||
let status = server_status_cache().get(&server.id).await;
|
||||
let (terminals_disabled, container_exec_disabled) =
|
||||
get_system_info(&server)
|
||||
.await
|
||||
.map(|i| (i.terminals_disabled, i.container_exec_disabled))
|
||||
.unwrap_or((true, true));
|
||||
let (
|
||||
version,
|
||||
public_key,
|
||||
terminals_disabled,
|
||||
container_exec_disabled,
|
||||
) = match status.as_ref().and_then(|s| s.periphery_info.as_ref())
|
||||
{
|
||||
Some(info) => (
|
||||
Some(info.version.clone()),
|
||||
Some(info.public_key.clone()),
|
||||
info.terminals_disabled,
|
||||
info.container_exec_disabled,
|
||||
),
|
||||
None => (None, None, true, true),
|
||||
};
|
||||
ServerListItem {
|
||||
name: server.name,
|
||||
id: server.id,
|
||||
@@ -70,18 +91,25 @@ impl super::KomodoResource for Server {
|
||||
resource_type: ResourceTargetVariant::Server,
|
||||
info: ServerListItemInfo {
|
||||
state: status.as_ref().map(|s| s.state).unwrap_or_default(),
|
||||
version: status
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("Unknown")),
|
||||
region: server.config.region,
|
||||
address: server.config.address,
|
||||
external_address: server.config.external_address,
|
||||
address: optional_string(server.config.address),
|
||||
external_address: optional_string(
|
||||
server.config.external_address,
|
||||
),
|
||||
send_unreachable_alerts: server
|
||||
.config
|
||||
.send_unreachable_alerts,
|
||||
send_cpu_alerts: server.config.send_cpu_alerts,
|
||||
send_mem_alerts: server.config.send_mem_alerts,
|
||||
send_disk_alerts: server.config.send_disk_alerts,
|
||||
send_version_mismatch_alerts: server
|
||||
.config
|
||||
.send_version_mismatch_alerts,
|
||||
version,
|
||||
public_key,
|
||||
attempted_public_key: optional_string(
|
||||
server.info.attempted_public_key,
|
||||
),
|
||||
terminals_disabled,
|
||||
container_exec_disabled,
|
||||
},
|
||||
@@ -120,7 +148,7 @@ impl super::KomodoResource for Server {
|
||||
created: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
update_cache_for_server(created).await;
|
||||
update_cache_for_server(created, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -142,7 +170,7 @@ impl super::KomodoResource for Server {
|
||||
updated: &Self,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
update_cache_for_server(updated).await;
|
||||
update_cache_for_server(updated, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -209,6 +237,14 @@ impl super::KomodoResource for Server {
|
||||
.await
|
||||
.context("failed to close deleted server alerts")?;
|
||||
|
||||
let _ = db_client()
|
||||
.onboarding_keys
|
||||
.update_many(
|
||||
doc! { "onboarded": &id },
|
||||
doc! { "$pull": { "onboarded": &id } },
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -216,7 +252,41 @@ impl super::KomodoResource for Server {
|
||||
resource: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
server_status_cache().remove(&resource.id).await;
|
||||
tokio::join!(
|
||||
server_status_cache().remove(&resource.id),
|
||||
periphery_connections().remove(&resource.id),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn update_server_public_key(
|
||||
server_id: &str,
|
||||
public_key: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
db_client()
|
||||
.servers
|
||||
.update_one(
|
||||
doc! { "_id": ObjectId::from_str(server_id)? },
|
||||
doc! { "$set": { "info.public_key": public_key } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update Server public key on database")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rotates Periphery keys and updates
|
||||
/// `server.info.public_key` to match new public key.
|
||||
/// Does so without making a specific update.
|
||||
pub async fn rotate_server_keys(
|
||||
server: &Server,
|
||||
) -> anyhow::Result<()> {
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let public_key = periphery
|
||||
.request(api::keys::RotatePrivateKey {})
|
||||
.await
|
||||
.context("Failed to rotate Periphery private key")?
|
||||
.public_key;
|
||||
update_server_public_key(&server.id, &public_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -252,7 +252,7 @@ impl super::KomodoResource for Stack {
|
||||
else {
|
||||
return Ok(());
|
||||
};
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -327,7 +327,7 @@ impl super::KomodoResource for Stack {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let periphery = match periphery_client(&server) {
|
||||
let periphery = match periphery_client(&server).await {
|
||||
Ok(periphery) => periphery,
|
||||
Err(e) => {
|
||||
// This case won't ever happen, as periphery_client only fallible if the server is disabled.
|
||||
|
||||
@@ -113,7 +113,7 @@ impl super::KomodoResource for ResourceSync {
|
||||
|
||||
async fn busy(id: &String) -> anyhow::Result<bool> {
|
||||
action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -242,7 +242,7 @@ async fn get_resource_sync_state(
|
||||
data: &ResourceSyncInfo,
|
||||
) -> ResourceSyncState {
|
||||
if let Some(state) = action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(id)
|
||||
.await
|
||||
.and_then(|s| {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user