Dockerized periphery (#34)

* Add webhooks page to docs

* supports

* supports

* periphery Dockerfile

* add comments. Remove unneeded default config

* add FILE SYSTEM log

* remove log

* filter disks included in periphery disk report, on periphery side

* dockerized periphery

* all in one compose file docs

* remove some unused deps
This commit is contained in:
Maxwell Becker
2024-08-17 03:25:42 -04:00
committed by GitHub
parent a89bd4a36d
commit 1f2d236228
29 changed files with 341 additions and 381 deletions

2
.cargo/config.toml Normal file
View File

@@ -0,0 +1,2 @@
[build]
rustflags = ["-Wunused-crate-dependencies"]

33
Cargo.lock generated
View File

@@ -2254,12 +2254,8 @@ dependencies = [
"futures",
"merge_config_files",
"monitor_client",
"partial_derive2",
"serde",
"serde_json",
"strum 0.26.3",
"tokio",
"toml",
"tracing",
"tracing-subscriber",
]
@@ -2326,7 +2322,6 @@ dependencies = [
"nom_pem",
"octorust",
"ordered_hash_map",
"parse_csl",
"partial_derive2",
"periphery_client",
"rand",
@@ -2340,13 +2335,11 @@ dependencies = [
"serror",
"sha2",
"slack_client_rs",
"strum 0.26.3",
"svi",
"tokio",
"tokio-util",
"toml",
"toml_pretty",
"tower",
"tower-http",
"tracing",
"typeshare",
@@ -2373,13 +2366,11 @@ dependencies = [
"logger",
"merge_config_files",
"monitor_client",
"parse_csl",
"periphery_client",
"resolver_api",
"run_command",
"serde",
"serde_json",
"serde_yaml",
"serror",
"svi",
"sysinfo",
@@ -2725,12 +2716,6 @@ dependencies = [
"windows-targets 0.52.5",
]
[[package]]
name = "parse_csl"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffa94c2e5674923c67d7f3dfce1279507b191e10eb064881b46ed3e1256e5ca6"
[[package]]
name = "parse_link_header"
version = "0.3.3"
@@ -3938,24 +3923,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "tests"
version = "1.13.1"
dependencies = [
"anyhow",
"dotenvy",
"envy",
"logger",
"monitor_client",
"mungos",
"partial_derive2",
"rand",
"serde",
"serde_json",
"tokio",
"tracing",
]
[[package]]
name = "thiserror"
version = "1.0.63"

View File

@@ -35,7 +35,6 @@ derive_variants = "1.0.0"
mongo_indexed = "2.0.0"
resolver_api = "1.1.1"
toml_pretty = "1.1.2"
parse_csl = "0.1.0"
mungos = "1.0.1"
svi = "1.0.1"
@@ -49,7 +48,6 @@ futures-util = "0.3.30"
# SERVER
axum = { version = "0.7.5", features = ["ws", "json"] }
axum-extra = { version = "0.9.3", features = ["typed-header"] }
tower = { version = "0.4.13", features = ["timeout"] }
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
tokio-tungstenite = "0.23.1"

View File

@@ -17,18 +17,13 @@ path = "src/main.rs"
[dependencies]
# local
monitor_client.workspace = true
# mogh
partial_derive2.workspace = true
# external
tracing-subscriber.workspace = true
merge_config_files.workspace = true
serde_json.workspace = true
futures.workspace = true
tracing.workspace = true
colored.workspace = true
anyhow.workspace = true
tokio.workspace = true
serde.workspace = true
strum.workspace = true
toml.workspace = true
clap.workspace = true

View File

@@ -30,7 +30,6 @@ mongo_indexed.workspace = true
resolver_api.workspace = true
toml_pretty.workspace = true
run_command.workspace = true
parse_csl.workspace = true
mungos.workspace = true
slack.workspace = true
svi.workspace = true
@@ -56,9 +55,7 @@ dotenvy.workspace = true
bcrypt.workspace = true
base64.workspace = true
tokio.workspace = true
tower.workspace = true
serde.workspace = true
strum.workspace = true
regex.workspace = true
axum.workspace = true
toml.workspace = true

View File

@@ -33,7 +33,7 @@ EXPOSE 9000
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
LABEL org.opencontainers.image.description="A tool to build and deploy software across many servers"
LABEL org.opencontainers.image.description="Monitor Core"
LABEL org.opencontainers.image.licenses=GPL-3.0
CMD ["./core"]

View File

@@ -77,6 +77,7 @@ pub fn core_config() -> &'static CoreConfig {
host: env.monitor_host.unwrap_or(config.host),
port: env.monitor_port.unwrap_or(config.port),
passkey: env.monitor_passkey.unwrap_or(config.passkey),
ensure_server: env.monitor_ensure_server.unwrap_or(config.ensure_server),
jwt_secret: env.monitor_jwt_secret.unwrap_or(config.jwt_secret),
jwt_ttl: env
.monitor_jwt_ttl

View File

@@ -3,14 +3,17 @@ use std::{collections::HashSet, str::FromStr, time::Duration};
use anyhow::{anyhow, Context};
use futures::future::join_all;
use mongo_indexed::Document;
use monitor_client::entities::{
monitor_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::Server,
sync::ResourceSync,
update::{Log, ResourceTarget, Update},
user::User,
EnvironmentVar,
use monitor_client::{
api::write::CreateServer,
entities::{
monitor_timestamp,
permission::{Permission, PermissionLevel, UserTarget},
server::{PartialServerConfig, Server},
sync::ResourceSync,
update::{Log, ResourceTarget, Update},
user::{system_user, User},
EnvironmentVar,
},
};
use mungos::{
find::find_collect,
@@ -19,8 +22,13 @@ use mungos::{
use periphery_client::PeripheryClient;
use query::get_global_variables;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use resolver_api::Resolve;
use crate::{config::core_config, resource, state::db_client};
use crate::{
config::core_config,
resource,
state::{db_client, State},
};
pub mod action_state;
pub mod alert;
@@ -375,3 +383,38 @@ async fn startup_open_alert_cleanup() {
)
}
}
/// Ensures a default server exists with the defined address
pub async fn ensure_server() {
let ensure_server = &core_config().ensure_server;
if ensure_server.is_empty() {
return;
}
let db = db_client().await;
let Ok(server) = db
.servers
.find_one(doc! { "config.address": ensure_server })
.await
.inspect_err(|e| error!("Failed to initialize 'ensure_server'. Failed to query db. {e:?}"))
else {
return;
};
if server.is_some() {
return;
}
if let Err(e) = State
.resolve(
CreateServer {
name: String::from("default"),
config: PartialServerConfig {
address: Some(ensure_server.to_string()),
..Default::default()
},
},
system_user().to_owned(),
)
.await
{
error!("Failed to initialize 'ensure_server'. Failed to CreateServer. {e:?}");
}
}

View File

@@ -33,6 +33,8 @@ async fn app() -> anyhow::Result<()> {
// includes init db_client check to crash on db init failure
helpers::startup_cleanup().await;
// Maybe initialize default server in All In One deployment.
helpers::ensure_server().await;
// init jwt client to crash on failure
state::jwt_client();

View File

@@ -27,12 +27,10 @@ merge_config_files.workspace = true
async_timing_util.workspace = true
resolver_api.workspace = true
run_command.workspace = true
parse_csl.workspace = true
svi.workspace = true
# external
axum-extra.workspace = true
serde_json.workspace = true
serde_yaml.workspace = true
futures.workspace = true
tracing.workspace = true
bollard.workspace = true

26
bin/periphery/Dockerfile Normal file
View File

@@ -0,0 +1,26 @@
# Build Periphery
FROM rust:1.80.1-bookworm AS builder
WORKDIR /builder
COPY . .
RUN cargo build -p monitor_periphery --release
# Final Image
FROM debian:bookworm-slim
# Install Deps
RUN apt update && apt install -y git curl ca-certificates && \
curl -fsSL https://get.docker.com | sh
# Copy
COPY --from=builder /builder/target/release/periphery /
# Hint at the port
EXPOSE 8120
# Label for Ghcr
LABEL org.opencontainers.image.source=https://github.com/mbecker20/monitor
LABEL org.opencontainers.image.description="Monitor Periphery"
LABEL org.opencontainers.image.licenses=GPL-3.0
# Using ENTRYPOINT allows cli args to be passed, eg using "command" in docker compose.
ENTRYPOINT [ "./periphery" ]

View File

@@ -15,53 +15,56 @@ pub fn periphery_config() -> &'static PeripheryConfig {
.expect("failed to parse periphery environment");
let args = CliArgs::parse();
let config_paths =
args.config_path.unwrap_or(env.monitor_config_paths);
args.config_path.unwrap_or(env.periphery_config_paths);
let config = if config_paths.is_empty() {
PeripheryConfig::default()
} else {
parse_config_paths::<PeripheryConfig>(
config_paths,
args.config_keyword.unwrap_or(env.monitor_config_keywords),
args.config_keyword.unwrap_or(env.periphery_config_keywords),
args
.merge_nested_config
.unwrap_or(env.monitor_merge_nested_config),
.unwrap_or(env.periphery_merge_nested_config),
args
.extend_config_arrays
.unwrap_or(env.monitor_extend_config_arrays),
.unwrap_or(env.periphery_extend_config_arrays),
)
.expect("failed at parsing config from paths")
};
PeripheryConfig {
port: env.monitor_port.unwrap_or(config.port),
repo_dir: env.monitor_repo_dir.unwrap_or(config.repo_dir),
stack_dir: env.monitor_stack_dir.unwrap_or(config.stack_dir),
port: env.periphery_port.unwrap_or(config.port),
repo_dir: env.periphery_repo_dir.unwrap_or(config.repo_dir),
stack_dir: env.periphery_stack_dir.unwrap_or(config.stack_dir),
stats_polling_rate: env
.monitor_stats_polling_rate
.periphery_stats_polling_rate
.unwrap_or(config.stats_polling_rate),
legacy_compose_cli: env
.monitor_legacy_compose_cli
.periphery_legacy_compose_cli
.unwrap_or(config.legacy_compose_cli),
logging: LogConfig {
level: args
.log_level
.map(LogLevel::from)
.or(env.monitor_logging_level)
.or(env.periphery_logging_level)
.unwrap_or(config.logging.level),
stdio: env
.monitor_logging_stdio
.periphery_logging_stdio
.unwrap_or(config.logging.stdio),
otlp_endpoint: env
.monitor_logging_otlp_endpoint
.periphery_logging_otlp_endpoint
.or(config.logging.otlp_endpoint),
opentelemetry_service_name: env
.monitor_logging_opentelemetry_service_name
.periphery_logging_opentelemetry_service_name
.unwrap_or(config.logging.opentelemetry_service_name),
},
allowed_ips: env
.monitor_allowed_ips
.periphery_allowed_ips
.unwrap_or(config.allowed_ips),
passkeys: env.monitor_passkeys.unwrap_or(config.passkeys),
passkeys: env.periphery_passkeys.unwrap_or(config.passkeys),
include_disk_mounts: env
.periphery_include_disk_mounts
.unwrap_or(config.include_disk_mounts),
secrets: config.secrets,
git_providers: config.git_providers,
docker_registries: config.docker_registries,

View File

@@ -101,11 +101,26 @@ impl StatsClient {
}
fn get_disks(&self) -> Vec<SingleDiskUsage> {
let config = periphery_config();
self
.disks
.list()
.iter()
.filter(|d| d.file_system() != "overlay")
.filter(|d| {
if d.file_system() != "overlay" {
return false;
}
if config.include_disk_mounts.is_empty() {
return true;
}
let path = d.mount_point();
for mount in &config.include_disk_mounts {
if path.starts_with(mount) {
return true;
}
}
false
})
.map(|disk| {
let file_system =
disk.file_system().to_string_lossy().to_string();

View File

@@ -1,20 +0,0 @@
[package]
name = "tests"
version.workspace = true
edition.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
monitor_client.workspace = true
logger.workspace = true
tokio.workspace = true
anyhow.workspace = true
serde.workspace = true
serde_json.workspace = true
partial_derive2.workspace = true
mungos.workspace = true
dotenvy.workspace = true
envy.workspace = true
rand.workspace = true
tracing.workspace = true

View File

@@ -1,120 +0,0 @@
use monitor_client::{
api::write::{
CreateBuild, CreateBuilder, CreateDeployment, CreateServer,
UpdateTagsOnResource,
},
entities::{
build::BuildConfig,
builder::{PartialBuilderConfig, ServerBuilderConfig},
deployment::DeploymentConfig,
server::ServerConfig,
},
MonitorClient,
};
use rand::Rng;
use crate::random_string;
#[allow(unused)]
pub async fn tests() -> anyhow::Result<()> {
dotenvy::dotenv().ok();
let monitor = MonitorClient::new_from_env().await?;
let tags = (0..6).map(|_| random_string(5)).collect::<Vec<_>>();
let mut rng = rand::thread_rng();
let mut get_tags = || vec![tags[rng.gen_range(0..6)].to_string()];
let server_names = (0..20)
.map(|i| format!("server-{}-{}", random_string(8), i))
.collect::<Vec<_>>();
for name in &server_names {
let resource = monitor
.write(CreateServer {
name: name.to_string(),
config: ServerConfig::builder()
.address(String::new())
.build()?
.into(),
})
.await?;
info!("created server {}", resource.name);
monitor
.write(UpdateTagsOnResource {
target: (&resource).into(),
tags: get_tags(),
})
.await?;
info!("updated tags on server {}", resource.name);
}
for (i, server_name) in server_names.iter().enumerate() {
let resource = monitor
.write(CreateDeployment {
name: format!("dep-{}-{}", random_string(8), i),
config: DeploymentConfig::builder()
.server_id(server_name.to_string())
.build()?
.into(),
})
.await?;
info!("created deployment {}", resource.name);
monitor
.write(UpdateTagsOnResource {
target: (&resource).into(),
tags: get_tags(),
})
.await?;
info!("updated tags on deployment {}", resource.name);
}
let builder_names = (0..20)
.map(|i| format!("builder-{}-{}", random_string(8), i))
.collect::<Vec<_>>();
for (i, server_name) in server_names.iter().enumerate() {
let resource = monitor
.write(CreateBuilder {
name: builder_names[i].clone(),
config: PartialBuilderConfig::Server(
ServerBuilderConfig {
server_id: server_name.to_string(),
}
.into(),
),
})
.await?;
info!("created builder {}", resource.name);
monitor
.write(UpdateTagsOnResource {
target: (&resource).into(),
tags: get_tags(),
})
.await?;
info!("updated tags on builder {}", resource.name);
}
for (i, builder_name) in builder_names.iter().enumerate() {
let resource = monitor
.write(CreateBuild {
name: format!("build-{}-{}", random_string(8), i),
config: BuildConfig::builder()
.builder_id(builder_name.to_string())
.build()?
.into(),
})
.await?;
info!("created build {}", resource.name);
monitor
.write(UpdateTagsOnResource {
target: (&resource).into(),
tags: get_tags(),
})
.await?;
info!("updated tags on build {}", resource.name);
}
Ok(())
}

View File

@@ -1,24 +0,0 @@
#[macro_use]
extern crate tracing;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
mod core;
// mod periphery;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
logger::init(&Default::default())?;
// periphery::tests().await?;
core::tests().await?;
Ok(())
}
fn random_string(length: usize) -> String {
thread_rng()
.sample_iter(&Alphanumeric)
.take(length)
.map(char::from)
.collect()
}

View File

@@ -1,53 +0,0 @@
// use periphery_client::{requests, PeripheryClient};
#[allow(unused)]
pub async fn tests() -> anyhow::Result<()> {
let periphery =
PeripheryClient::new("http://localhost:9001", "monitor_passkey");
let version = periphery.request(requests::GetVersion {}).await?;
println!("{version:?}");
let system_info =
periphery.request(requests::GetSystemInformation {}).await?;
println!("{system_info:#?}");
let processes =
periphery.request(requests::GetSystemProcesses {}).await?;
// println!("{system_stats:#?}");
let periphery_process =
processes.into_iter().find(|p| p.name.contains("periphery"));
println!("{periphery_process:#?}");
let accounts = periphery.request(requests::GetAccounts {}).await?;
println!("{accounts:#?}");
let secrets = periphery.request(requests::GetSecrets {}).await?;
println!("{secrets:#?}");
let container_stats = periphery
.request(requests::GetContainerStatsList {})
.await?;
println!("{container_stats:#?}");
let res = periphery.request(requests::GetNetworkList {}).await?;
println!("{res:#?}");
let res = periphery
.request(requests::GetContainerStats {
name: "monitor-mongo".into(),
})
.await?;
println!("{res:#?}");
let res = periphery
.request(requests::GetContainerLog {
name: "monitor-mongo".into(),
tail: 50,
})
.await?;
println!("{res:#?}");
Ok(())
}

View File

@@ -45,6 +45,8 @@ pub struct Env {
pub monitor_port: Option<u16>,
/// Override `passkey`
pub monitor_passkey: Option<String>,
/// Override `ensure_server`
pub monitor_ensure_server: Option<String>,
/// Override `jwt_secret`
pub monitor_jwt_secret: Option<String>,
/// Override `jwt_ttl`
@@ -184,6 +186,12 @@ pub struct CoreConfig {
#[serde(default)]
pub ui_write_disabled: bool,
/// If defined, ensure an enabled server exists at this address.
/// Use with All In One compose.
/// Example: `http://monitor-periphery:8120`
#[serde(default)]
pub ensure_server: String,
// ============
// = Database =
// ============
@@ -395,6 +403,7 @@ impl CoreConfig {
host: config.host,
port: config.port,
passkey: empty_or_redacted(&config.passkey),
ensure_server: config.ensure_server,
jwt_secret: empty_or_redacted(&config.jwt_secret),
jwt_ttl: config.jwt_ttl,
repo_directory: config.repo_directory,

View File

@@ -86,7 +86,7 @@ pub struct Env {
///
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
#[serde(default)]
pub monitor_config_paths: Vec<String>,
pub periphery_config_paths: Vec<String>,
/// If specifying folders, use this to narrow down which
/// files will be matched to parse into the final [PeripheryConfig].
/// Only files inside the folders which have names containing all keywords
@@ -94,120 +94,54 @@ pub struct Env {
///
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
#[serde(default)]
pub monitor_config_keywords: Vec<String>,
pub periphery_config_keywords: Vec<String>,
/// Will merge nested config object (eg. secrets, providers) across multiple
/// config files. Default: `false`
///
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
#[serde(default)]
pub monitor_merge_nested_config: bool,
pub periphery_merge_nested_config: bool,
/// Will extend config arrays (eg. `allowed_ips`, `passkeys`) across multiple config files.
/// Default: `false`
///
/// Note. This is overridden if the equivalent arg is passed in [CliArgs].
#[serde(default)]
pub monitor_extend_config_arrays: bool,
pub periphery_extend_config_arrays: bool,
/// Override `port`
pub monitor_port: Option<u16>,
pub periphery_port: Option<u16>,
/// Override `repo_dir`
pub monitor_repo_dir: Option<PathBuf>,
pub periphery_repo_dir: Option<PathBuf>,
/// Override `stack_dir`
pub monitor_stack_dir: Option<PathBuf>,
pub periphery_stack_dir: Option<PathBuf>,
/// Override `stats_polling_rate`
pub monitor_stats_polling_rate: Option<Timelength>,
pub periphery_stats_polling_rate: Option<Timelength>,
/// Override `legacy_compose_cli`
pub monitor_legacy_compose_cli: Option<bool>,
pub periphery_legacy_compose_cli: Option<bool>,
// LOGGING
/// Override `logging.level`
pub monitor_logging_level: Option<LogLevel>,
pub periphery_logging_level: Option<LogLevel>,
/// Override `logging.stdio`
pub monitor_logging_stdio: Option<StdioLogMode>,
pub periphery_logging_stdio: Option<StdioLogMode>,
/// Override `logging.otlp_endpoint`
pub monitor_logging_otlp_endpoint: Option<String>,
pub periphery_logging_otlp_endpoint: Option<String>,
/// Override `logging.opentelemetry_service_name`
pub monitor_logging_opentelemetry_service_name: Option<String>,
pub periphery_logging_opentelemetry_service_name: Option<String>,
/// Override `allowed_ips`
pub monitor_allowed_ips: Option<Vec<IpAddr>>,
pub periphery_allowed_ips: Option<Vec<IpAddr>>,
/// Override `passkeys`
pub monitor_passkeys: Option<Vec<String>>,
pub periphery_passkeys: Option<Vec<String>>,
/// Override `include_disk_mounts`
pub periphery_include_disk_mounts: Option<Vec<String>>,
}
/// # Periphery Configuration File
///
/// The periphery agent initializes it's configuration by reading the environment,
/// parsing the [PeripheryConfig] schema from the files specified by cli args (and falling back to `env.config_paths`),
/// and then applying any config field overrides specified in the environment.
///
/// ## Example TOML
/// ```toml
/// ## optional. 8120 is default
/// port = 8120
///
/// ## optional. `/etc/monitor/repos` is default.
/// repo_dir = "/etc/monitor/repos"
///
/// ## optional. `/etc/monitor/stacks` is default.
/// stack_dir = "/etc/monitor/stacks"
///
/// ## optional. 5-sec is default.
/// ## can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min.
/// ## controls granularity of system stats recorded
/// stats_polling_rate = "5-sec"
///
/// ## Whether stack actions should use `docker-compose ...`
/// ## instead of `docker compose ...`.
/// ## default: false
/// legacy_compose_cli = false
///
/// ## optional. default is empty, which will not block any request by ip.
/// allowed_ips = ["127.0.0.1"]
///
/// ## optional. default is empty, which will not require any passkey to be passed by core.
/// passkeys = ["abcdefghijk"]
///
/// ## specify the log level of the monitor core application
/// ## default: info
/// ## options: off, error, warn, info, debug, trace
/// logging.level = "info"
///
/// ## specify the logging format for stdout / stderr.
/// ## default: standard
/// ## options: standard, json, none
/// logging.stdio = "standard"
///
/// ## specify an otlp endpoint to send traces to
/// ## optional, default unassigned
/// # logging.otlp_endpoint = "http://localhost:4317"
///
/// ## specify the service name to send with otlp traces.
/// ## optional, default 'Monitor'.
/// # logging.opentelemetry_service_name = "Monitor"
///
/// ## configure perihery-based secrets
/// [secrets]
/// # SECRET_1 = "value_1"
/// # SECRET_2 = "value_2"
///
/// ## configure periphery-based git providers
/// # [[git_provider]]
/// # domain = "git.mogh.tech" # use a custom provider, like self-hosted gitea
/// # accounts = [
/// # { username = "mbecker20", token = "access_token_for_account" },
/// # ]
///
/// ## configure periphery-based docker registries
/// # [[docker_registry]]
/// # domain = "docker.io"
/// # accounts = [
/// # { username = "mbecker2020", token = "access_token_for_account" }
/// # ]
/// # organizations = ["DockerhubOrganization"]
/// ```
/// Refer to the [example file](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml) for a full example.
#[derive(Debug, Clone, Deserialize)]
pub struct PeripheryConfig {
/// The port periphery will run on.
@@ -254,6 +188,10 @@ pub struct PeripheryConfig {
#[serde(default)]
pub passkeys: Vec<String>,
/// If non-empty, only includes specific mount paths in the disk report.
#[serde(default)]
pub include_disk_mounts: Vec<String>,
/// Mapping on local periphery secrets. These can be interpolated into eg. Deployment environment variables.
/// Default: none
#[serde(default)]
@@ -297,6 +235,7 @@ impl Default for PeripheryConfig {
logging: Default::default(),
allowed_ips: Default::default(),
passkeys: Default::default(),
include_disk_mounts: Default::default(),
secrets: Default::default(),
git_providers: Default::default(),
docker_registries: Default::default(),

View File

@@ -81,7 +81,8 @@ impl User {
pub fn is_service_user(user_id: &str) -> bool {
matches!(
user_id,
"Procedure"
"System"
| "Procedure"
| "Github" // Github can be removed later, just keeping for backward compat.
| "Git Webhook"
| "Auto Redeploy"
@@ -95,6 +96,7 @@ impl User {
pub fn admin_service_user(user_id: &str) -> Option<User> {
match user_id {
"System" => system_user().to_owned().into(),
"Procedure" => procedure_user().to_owned().into(),
// Github should be removed later, replaced by Git Webhook, just keeping for backward compat.
"Github" => git_webhook_user().to_owned().into(),
@@ -108,6 +110,19 @@ pub fn admin_service_user(user_id: &str) -> Option<User> {
}
}
pub fn system_user() -> &'static User {
static SYSTEM_USER: OnceLock<User> = OnceLock::new();
SYSTEM_USER.get_or_init(|| {
let id_name = String::from("System");
User {
id: id_name.clone(),
username: id_name,
admin: true,
..Default::default()
}
})
}
pub fn procedure_user() -> &'static User {
static PROCEDURE_USER: OnceLock<User> = OnceLock::new();
PROCEDURE_USER.get_or_init(|| {

View File

@@ -0,0 +1,78 @@
######################
# ALL IN ONE COMPOSE #
######################
## This compose file will bring up both Core and Periphery in containers.
## A "default" server pointing to the local Periphery will be waiting in the UI on first startup.
services:
monitor-core:
image: ghcr.io/mbecker20/monitor:latest ## use ghcr.io/mbecker20/monitor:latest-aarch64 for arm support
restart: unless-stopped
depends_on:
- monitor-mongo
logging:
driver: local # enable log rotation by default. see `https://docs.docker.com/config/containers/logging/local/`
networks:
- monitor-network
ports:
- 9120:9120
environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml
MONITOR_HOST: https://demo.monitor.dev # CHANGEME
MONITOR_TITLE: Monitor # Change the app title, displayed in the browser tab.
MONITOR_ENSURE_SERVER: http://monitor-periphery:8120 # Created the "default" server.
## MONGO
MONITOR_MONGO_ADDRESS: monitor-mongo:27017
MONITOR_MONGO_USERNAME: admin # match db credentials ones below
MONITOR_MONGO_PASSWORD: admin
## KEYS
MONITOR_PASSKEY: a_random_passkey # used to auth against periphery
MONITOR_WEBHOOK_SECRET: a_random_secret # used to authenticate incoming webhooks
MONITOR_JWT_SECRET: a_random_jwt_secret # Optional. If empty, will have to log in again on restart.
## AUTH
MONITOR_LOCAL_AUTH: true # the default is false.
# MONITOR_GITHUB_OAUTH_ENABLED: true # also support google oauth
# MONITOR_GITHUB_OAUTH_ID: your_oauth_id
# MONITOR_GITHUB_OAUTH_SECRET: your_oauth_secret
## AWS
# MONITOR_AWS_ACCESS_KEY_ID: your_aws_key_id
# MONITOR_AWS_SECRET_ACCESS_KEY: your_secret_access_key
## HETZNER
# MONITOR_HETZNER_TOKEN: your_hetzner_token
## Deploy periphery container using this block,
## or deploy it on the host directly using https://github.com/mbecker20/monitor/tree/main/scripts
monitor-periphery:
image: ghcr.io/mbecker20/periphery:latest
logging:
driver: local
networks:
- monitor-network
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- monitor-repos:/etc/monitor/repos # manage repos in a docker volume, or change it to an accessible host directory.
environment:
# PERIPHERY_INCLUDE_DISK_MOUNTS: /etc/monitor/repos # If the disk size is overreporting, only specific mounts.
monitor-mongo:
image: mongo
command: --quiet # suppress mongo logs a bit
restart: unless-stopped
logging:
driver: local
networks:
- monitor-network
ports:
- 27017:27017
volumes:
- db-data:/data/db
environment:
MONGO_INITDB_ROOT_USERNAME: admin # change these
MONGO_INITDB_ROOT_PASSWORD: admin
volumes:
db-data:
monitor-repos:
networks:
monitor-network: {}

View File

@@ -14,6 +14,7 @@ services:
- host.docker.internal:host-gateway
environment: # https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml
MONITOR_HOST: https://demo.monitor.dev
MONITOR_TITLE: Monitor # Change the app title, displayed in the browser tab.
## MONGO
MONITOR_MONGO_ADDRESS: monitor-mongo:27017
MONITOR_MONGO_USERNAME: admin # match ones below

View File

@@ -36,6 +36,12 @@ host = "https://monitor.dev"
## Required to start Monitor, no default
passkey = "a_random_passkey"
## Ensure a server with this address exists on Core
## upon first startup. Used with AIO compose.
## Optional, no default.
## Env: MONITOR_ENSURE_SERVER
# ensure_server = "http://monitor-periphery:8120"
## Disables write support on resources in the UI.
## This protects users that that would normally have write priviledges during their UI usage,
## when they intend to fully rely on ResourceSyncs to manage config.

View File

@@ -3,38 +3,51 @@
############################
## Optional. The port the server runs on. 8120 is default
## Env: PERIPHERY_PORT
# port = 8120
## Optional. /etc/monitor/repos is default.
## The directory periphery will use to manage repos.
## The periphery user must have write access to this directory.
## Env: PERIPHERY_REPO_DIR
# repo_dir = "/home/ubuntu/monitor/repos"
## Optional. /etc/monitor/stacks is default.
## The directory periphery will use to manage stacks.
## The periphery user must have write access to this directory.
## Env: PERIPHERY_STACK_DIR
# stack_dir = "/home/ubuntu/monitor/stacks"
## Optional. 5-sec is default. can use 1-sec, 5-sec, 10-sec, 30-sec, 1-min. controls granularity of system stats recorded
## Env: PERIPHERY_STATS_POLLING_RATE
# stats_polling_rate = "1-sec"
## Whether stack actions should use `docker-compose ...`
## instead of `docker compose ...`.
## default: false
## Env: PERIPHERY_LEGACY_COMPOSE_CLI
# legacy_compose_cli = true
## Optional. Only include mounts with start with a specific path in the disk report.
## Env: PERIPHERY_INCLUDE_DISK_MOUNTS
# include_disk_mounts = ["/etc/monitor/repos"]
########
# AUTH #
########
## Optional. Limit the ip addresses which can call the periphery api.
## Default is empty, which will not block any request by ip.
## Env: PERIPHERY_ALLOWED_IPS
# allowed_ips = ["127.0.0.1"]
## Optional. Require callers to provide on of the provided passkeys to access the periphery api.
## Default is empty, which will not require any passkey to be passed by core.
## Env: PERIPHERY_PASSKEYS
# passkeys = ["abcdefghijk"]
###########
# SECRETS #
###########
@@ -95,17 +108,21 @@
## Specify the log level of the monitor core application
## Default: info
## Options: off, error, warn, info, debug, trace
## Env: PERIPHERY_LOGGING_LEVEL
# logging.level = "debug"
## Specify the logging format for stdout / stderr.
## Default: standard
## Options: standard, json, none
## Env: PERIPHERY_LOGGING_STDIO
# logging.stdio = "json"
## Specify a opentelemetry otlp endpoint to send traces to
## Optional, default unassigned
## Env: PERIPHERY_LOGGING_OTLP_ENDPOINT
# logging.otlp_endpoint = "http://localhost:4317"
## Set the opentelemetry service name attached to the telemetry this periphery will send.
## Default: "Monitor"
## Env: PERIPHERY_LOGGING_OPENTELEMETRY_SERVICE_NAME
# logging.opentelemetry_service_name = "Periphery-02"

View File

@@ -2,20 +2,24 @@
To run Monitor Core, you will need Docker. See [the docker install docs](https://docs.docker.com/engine/install/).
:::info
Monitor Core itself can really only run remote builds.
You also have to [**install the Monitor Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers**
in order to alert / deploy etc.
You can currently and always will be able to **connect as many servers an you like** using the Periphery agent.
:::
### Deploy Monitor Core with Docker Compose
There is an example compose file here: [https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml](https://github.com/mbecker20/monitor/blob/main/config_example/core.compose.yaml).
Copy the contents to a `compose.yaml`, and deploy it with `docker compose up -d`.
:::info
Monitor Core itself can really only run remote builds.
You also have to [**install the Monitor Periphery agent**](/docs/connecting-servers) on your hosts and connect them as **Servers**
in order to alert / deploy etc.
If you only need to connect on one server (the one you are deploying Monitor Core on), you can do it all dockerized,
and use the [all-in-one compose file](https://github.com/mbecker20/monitor/blob/main/config_example/aio.compose.yaml).
This will deploy Monitor Core and Periphery, and automatically add the local periphery as a connected server.
You can currently and always will be able to **connect as many servers an you like** using the Periphery agent.
:::
### Configuration
You can configure Monitor with environment variables, or using a config file.

View File

@@ -0,0 +1,23 @@
# Docker Compose
Monitor supports docker compose through the `Stack` resource. Just create a new Stack, with any name, to get started.
## Define the file/s
While Monitor supports pasting in / managing the compose file in UI, the best way to deploy Stacks is using compose files located in a git repo.
If you manage your compose files in git repos:
- All your files, across all servers, are available locally to edit in your favorite text editor.
- All of your changes are tracked, and can be reverted.
- You can layer multiple compose files for greater composability, just like using `docker compose -f service_1.yaml -f service_2.yaml ...`
- You can use the git webhooks to do other automations when you change the compose file contents. Redeploying will be as easy as just `git push`.
:::info
Many Monitor resources need access to git repos. There is an in-built token management system (managed in UI or in config file) to give resources access to credentials.
All resources which depend on git repos are able to use these credentials to access private repos.
:::
## Define the Environment
## Deploy Stacks

35
docsite/docs/webhooks.md Normal file
View File

@@ -0,0 +1,35 @@
# Configuring Webhooks
Multiple Monitor resources can take advantage of webhooks from your git provider. Monitor supports incoming webhooks using the Github standard, which is also supported by other providers like Gitea.
:::note
On Gitea, the default "Gitea" webhook type works with the Github standard 👍
:::
## Copy the Resource Payload URL
Find the resource in UI, like a `Build`, `Repo`, or `Stack`.
Scroll down to the bottom of Configuration area, and copy the webhook for the action you want.
## Create the webhook on the Git Provider
Navigate to the repo page on your git provider, and go to the settings for the Repo.
Find Webhook settings, and click to create a new webhook.
You will have to input some information.
1. The `Payload URL` is the link that you copied in the step above, `Copy the Resource Payload URL`.
2. For Content-type, choose `application/json`
3. For Secret, input the secret you configured in the Monitor Core config (`MONITOR_WEBHOOK_SECRET`).
4. Enable SSL Verification, if you have proper TLS setup to your git provider (recommended).
5. For "events that trigger the webhook", just the push request is what post people want.
6. Of course, make sure the webhook is "Active" and hit create.
## When does it trigger?
Your git provider will now push this webhook to Monitor on *every* push to *any* branch. However, your `Build`, `Repo`,
etc. only cares about a specific branch of the repo.
Because of this, the webhook will trigger the action **only on pushes to the branch configured on the resource**.
For example, if I make a build, I may point the build to the `release` branch of a particular repo. If I set up a webhook, and push to the `main` branch, the action will *not trigger*. It will only trigger when the push is to the `release` branch.

View File

@@ -45,6 +45,7 @@ const sidebars: SidebarsConfig = {
],
},
"sync-resources",
"webhooks",
"permissioning",
"version-upgrades",
"api",

View File

@@ -1,5 +1,7 @@
# Periphery setup script
These scripts will set up Monitor Periphery on your hosts, managed by systemd.
*Note*. This script can be run multiple times without issue, and it won't change existing config after the first run. Just run it again after a Monitor version release, and it will update the periphery version.
*Note*. The script can usually detect aarch64 system and use the periphery-aarch64 binary.