Compare commits

...

55 Commits

Author SHA1 Message Date
mbecker20
5579ba869c v0.2.7 remove passkeys from periphery startup log 2023-03-03 17:27:42 +00:00
mbecker20
210940038c hide passkeys on periphery startup config log 2023-03-03 17:24:28 +00:00
mbecker20
98a1a60362 /home/ubuntu/example 2023-03-03 08:15:17 +00:00
mbecker20
86cf9116ba update builds and deployments docs with link to file paths doc 2023-03-03 08:09:41 +00:00
mbecker20
8b2defe0d9 add doc about file paths 2023-03-03 07:58:09 +00:00
mbecker20
50b14b3ce5 0.2.6 store ami name instead of ami_id (because the id has to change sometimes) 2023-03-03 07:11:55 +00:00
mbecker20
1bfb17cb5d handle setting default ami id correctly 2023-03-02 21:32:22 +00:00
mbecker20
b90acb66c7 0.2.5 stop leaking github token 2023-03-02 21:25:46 +00:00
mbecker20
7648b0dd10 don't let github access token leak when clone fails 2023-03-02 21:21:07 +00:00
mbecker20
2d69f1791a default builds to use aws config on create 2023-03-02 17:11:23 +00:00
mbecker20
5ba887095a allow select "none" for docker organization 2023-03-02 17:00:40 +00:00
mbecker20
19b7405562 show organization immediately if it exists on build 2023-03-01 23:20:54 +00:00
mbecker20
f5c5f734e1 clean deployment / build config before update 2023-03-01 21:18:40 +00:00
mbecker20
8d1639bcaf fix build permissions 2023-03-01 10:18:58 +00:00
mbecker20
e2446af00e remove print 2023-03-01 10:12:26 +00:00
mbecker20
1b39aaaa38 implement description 2023-03-01 09:46:50 +00:00
mbecker20
5a2a1a3d98 0.2.4: add description and update description 2023-03-01 08:13:07 +00:00
mbecker20
39eceb745b v0.2.2: configure docker organizations for builds 2023-03-01 07:18:49 +00:00
beckerinj
4c1ec5db33 edit permissions.md 2023-02-28 02:26:57 -05:00
beckerinj
8b68b9481e permissions.md 2023-02-28 02:21:46 -05:00
beckerinj
14843f83c6 add core setup link to table of contents 2023-02-28 01:56:05 -05:00
beckerinj
e67d87e885 even 2023-02-28 01:53:54 -05:00
beckerinj
7d4d865d58 elaborate on networks 2023-02-28 01:53:04 -05:00
beckerinj
1e4aaff23c if to is 2023-02-28 01:37:43 -05:00
beckerinj
df3f4a5f4a improve builds.md 2023-02-28 01:36:35 -05:00
beckerinj
1f8557300d fix type 2023-02-28 01:33:55 -05:00
beckerinj
bf17d705f0 fix typo 2023-02-28 01:33:09 -05:00
beckerinj
0d24b792c6 container lifetime management 2023-02-28 01:30:43 -05:00
mbecker20
fb61e36417 remove download log button, its kind of unsafe if the log is long 2023-02-28 06:18:42 +00:00
beckerinj
c39869d2f8 deployments.md 2023-02-28 01:18:11 -05:00
mbecker20
750e0274da #example 2023-02-28 05:54:43 +00:00
beckerinj
a9d37ab667 add placeholders to show to to pass env 2023-02-28 00:52:09 -05:00
mbecker20
eacb549d5e update core config example with github_webhook_base_url 2023-02-28 05:08:05 +00:00
mbecker20
ce7cb8fe45 improve confirm menu with copy button 2023-02-28 04:58:39 +00:00
mbecker20
f9fe4e32b4 restyle builds and deployments 2023-02-28 04:24:15 +00:00
mbecker20
2c9fc2bad4 always show docker account 2023-02-28 03:57:35 +00:00
mbecker20
94949291c2 fix notifications, add dynamic listener url 2023-02-28 03:41:25 +00:00
beckerinj
2944ba6ef9 cli v0.2.3 2023-02-27 22:18:04 -05:00
beckerinj
997e68a31d dynamic github webhook base url 2023-02-27 22:17:37 -05:00
beckerinj
bfb9d9e34d add periphery version in builder connected logs 2023-02-27 21:46:43 -05:00
mbecker20
3b9219b586 fix updates selector style 2023-02-27 05:55:33 +00:00
mbecker20
7bf2a88ab1 finish build args section 2023-02-27 05:53:56 +00:00
mbecker20
d21ed093dc fix build args gap 2023-02-27 05:52:13 +00:00
mbecker20
6e89671e91 switch cli build and build args build config 2023-02-27 05:47:10 +00:00
beckerinj
ee1128a666 Update builds.md 2023-02-27 00:45:16 -05:00
beckerinj
63b5deecd7 Update servers.md 2023-02-27 00:44:01 -05:00
mbecker20
f4f97ce1a7 finish builds / servers 2023-02-27 05:42:22 +00:00
mbecker20
a666df099f use image on deployment container 2023-02-26 06:55:31 +00:00
mbecker20
21dd0ee072 cli should be 0.2.2 2023-02-26 06:48:38 +00:00
mbecker20
bd2a1d4236 v0.2.1 merge multiple config files 2023-02-26 06:25:26 +00:00
mbecker20
7acdbcfd8f improve updates selector - add class 2023-02-25 22:34:37 +00:00
mbecker20
58514c5c93 fix height of builder config when no builder type chosen 2023-02-25 22:07:05 +00:00
mbecker20
580e800923 fix clap args with - 2023-02-23 23:14:02 +00:00
mbecker20
29f6b19f33 cli 0.2.0. fix starting mongo when no existing container present 2023-02-23 22:46:17 +00:00
mbecker20
e090247723 fix error when user doesn't have access to build on deployment 2023-02-23 08:00:55 +00:00
100 changed files with 1840 additions and 1119 deletions

516
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_cli"
version = "0.1.23"
version = "0.2.7"
edition = "2021"
authors = ["MoghTech"]
description = "monitor cli | tools to setup monitor system"
@@ -13,6 +13,7 @@ path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
monitor_types = { path = "../lib/types" }
clap = "4.0"
async_timing_util = "0.1.14"
rand = "0.8"

View File

@@ -7,15 +7,13 @@ use std::{
str::FromStr,
};
use async_timing_util::Timelength;
use clap::ArgMatches;
use colored::Colorize;
use monitor_types::{CoreConfig, MongoConfig, PeripheryConfig, RestartMode, Timelength};
use rand::{distributions::Alphanumeric, Rng};
use run_command::run_command_pipe_to_terminal;
use serde::Serialize;
use crate::types::{CoreConfig, MongoConfig, PeripheryConfig, RestartMode};
const CORE_IMAGE_NAME: &str = "mbecker2020/monitor_core";
const PERIPHERY_IMAGE_NAME: &str = "mbecker2020/monitor_periphery";
const PERIPHERY_CRATE: &str = "monitor_periphery";
@@ -75,6 +73,7 @@ pub fn gen_core_config(sub_matches: &ArgMatches) {
github_oauth: Default::default(),
google_oauth: Default::default(),
aws: Default::default(),
docker_organizations: Default::default(),
mongo: MongoConfig {
uri: mongo_uri,
db_name: mongo_db_name,
@@ -82,6 +81,7 @@ pub fn gen_core_config(sub_matches: &ArgMatches) {
},
jwt_secret: generate_secret(40),
github_webhook_secret: generate_secret(30),
github_webhook_base_url: None,
passkey: generate_secret(30),
};
@@ -178,7 +178,10 @@ pub fn start_mongo(sub_matches: &ArgMatches) {
}
}
let command = format!("docker stop {name} && docker container rm {name} && docker run -d --name {name} -p {port}:27017 --network {network} -v {mount}:/data/db{env} --restart {restart} --log-opt max-size=15m --log-opt max-file=3 mongo --quiet");
let stop =
run_command_pipe_to_terminal(&format!("docker stop {name} && docker container rm {name}"));
let command = format!("docker run -d --name {name} -p {port}:27017 --network {network} -v {mount}:/data/db{env} --restart {restart} --log-opt max-size=15m --log-opt max-file=3 mongo --quiet");
let output = run_command_pipe_to_terminal(&command);
@@ -316,7 +319,9 @@ pub fn gen_periphery_config(sub_matches: &ArgMatches) {
.map(|p| p.as_str())
.unwrap_or("~/.monitor/repos")
.to_string()
.replace("~", env::var("HOME").unwrap().as_str());
.replace("~", env::var("HOME").unwrap().as_str())
.parse()
.expect("failed to parse --repo_dir as path");
let config = PeripheryConfig {
port,

View File

@@ -3,7 +3,6 @@
use clap::{arg, Arg, Command};
mod helpers;
mod types;
use helpers::*;
@@ -36,19 +35,19 @@ fn cli() -> Command {
.required(false)
)
.arg(
arg!(--mongo-uri <URI> "sets the mongo uri to use. default is 'mongodb://monitor-mongo'")
arg!(--"mongo-uri" <URI> "sets the mongo uri to use. default is 'mongodb://monitor-mongo'")
.required(false)
)
.arg(
arg!(--mongo-db-name <NAME> "sets the db name to use. default is 'monitor'")
arg!(--"mongo-db-name" <NAME> "sets the db name to use. default is 'monitor'")
.required(false)
)
.arg(
arg!(--jwt-valid-for <TIMELENGTH> "sets the length of time jwt stays valid for. default is 1-wk (one week)")
arg!(--"jwt-valid-for" <TIMELENGTH> "sets the length of time jwt stays valid for. default is 1-wk (one week)")
.required(false)
)
.arg(
arg!(--slack-url <URL> "sets the slack url to use for slack notifications")
arg!(--"slack-url" <URL> "sets the slack url to use for slack notifications")
.required(false)
),
)
@@ -96,7 +95,7 @@ fn cli() -> Command {
arg!(--name <NAME> "specify the name of the monitor core container. default is monitor-core")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/core.config.toml")
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/core.config.toml")
.required(false)
)
.arg(
@@ -111,7 +110,7 @@ fn cli() -> Command {
arg!(--restart <RESTART> "sets docker restart mode of monitor core container. default is unless-stopped")
)
.arg(
arg!(--add-internal-host "adds the docker flag '--add-host=host.docker.internal:host-gateway'. default is true")
arg!(--"add-internal-host" "adds the docker flag '--add-host=host.docker.internal:host-gateway'. default is true")
)
),
)
@@ -133,15 +132,15 @@ fn cli() -> Command {
.required(false)
)
.arg(
arg!(--stats-polling-rate <INTERVAL> "sets stats polling rate to control granularity of system stats returned. default is 5-sec. options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min")
arg!(--"stats-polling-rate" <INTERVAL> "sets stats polling rate to control granularity of system stats returned. default is 5-sec. options: 1-sec, 5-sec, 10-sec, 30-sec, 1-min")
.required(false)
)
.arg(
arg!(--allowed-ips <IPS> "used to only accept requests from known ips. give ips as comma seperated list, like '--allowed_ips 127.0.0.1,10.20.30.43'. default is empty, which will not block any ip.")
arg!(--"allowed-ips" <IPS> "used to only accept requests from known ips. give ips as comma seperated list, like '--allowed_ips 127.0.0.1,10.20.30.43'. default is empty, which will not block any ip.")
.required(false)
)
.arg(
arg!(--repo-dir <PATH> "if running in container, this should be '/repos'. default is ~/.monitor/repos").required(false)
arg!(--"repo-dir" <PATH> "if running in container, this should be '/repos'. default is ~/.monitor/repos").required(false)
)
)
.subcommand(
@@ -157,7 +156,7 @@ fn cli() -> Command {
arg!(--install "specify this to install periphery from crates.io")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
)
@@ -171,7 +170,7 @@ fn cli() -> Command {
arg!(--install "specify this to install periphery from crates.io")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
.arg(
@@ -183,32 +182,32 @@ fn cli() -> Command {
.required(false)
)
)
.subcommand(
Command::new("container")
.about("start up monitor periphery in docker container")
.arg(
arg!(--yes "used in scripts to skip 'enter to continue' step")
)
.arg(
arg!(--name <NAME> "specify the name of the monitor periphery container. default is monitor-periphery")
)
.arg(
arg!(--config-path <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
.required(false)
)
.arg(arg!(--repo-dir <PATH> "specify the folder on host to clone repos into. default is ~/.monitor/repos").required(false))
.arg(
arg!(--port <PORT> "sets port monitor periphery will run on. default is 8000")
.required(false)
)
.arg(
arg!(--network <NETWORK> "sets docker network of monitor periphery container. default is bridge")
.required(false)
)
.arg(
arg!(--restart <RESTART> "sets docker restart mode of monitor periphery container. default is unless-stopped")
)
)
// .subcommand(
// Command::new("container")
// .about("start up monitor periphery in docker container")
// .arg(
// arg!(--yes "used in scripts to skip 'enter to continue' step")
// )
// .arg(
// arg!(--name <NAME> "specify the name of the monitor periphery container. default is monitor-periphery")
// )
// .arg(
// arg!(--"config-path" <PATH> "specify the file path to use for config. default is ~/.monitor/periphery.config.toml")
// .required(false)
// )
// .arg(arg!(--"repo-dir" <PATH> "specify the folder on host to clone repos into. default is ~/.monitor/repos").required(false))
// .arg(
// arg!(--port <PORT> "sets port monitor periphery will run on. default is 8000")
// .required(false)
// )
// .arg(
// arg!(--network <NETWORK> "sets docker network of monitor periphery container. default is bridge")
// .required(false)
// )
// .arg(
// arg!(--restart <RESTART> "sets docker restart mode of monitor periphery container. default is unless-stopped")
// )
// )
),
)
}
@@ -239,7 +238,7 @@ fn main() {
match periphery_start_command {
("systemd", sub_matches) => start_periphery_systemd(sub_matches),
("daemon", sub_matches) => start_periphery_daemon(sub_matches),
("container", sub_matches) => start_periphery_container(sub_matches),
// ("container", sub_matches) => start_periphery_container(sub_matches),
_ => println!("\n❌ invalid call, should be 'monitor periphery start <daemon, container> <flags>' ❌\n")
}
}

View File

@@ -1,200 +0,0 @@
use std::{collections::HashMap, net::IpAddr};
use async_timing_util::Timelength;
use serde_derive::{Deserialize, Serialize};
use strum_macros::{Display, EnumString};
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct CoreConfig {
// the host to use with oauth redirect url, whatever host the user hits to access monitor. eg 'https://monitor.mogh.tech'
pub host: String,
// port the core web server runs on
#[serde(default = "default_core_port")]
pub port: u16,
// daily utc offset in hours to run daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
#[serde(default)]
pub daily_offset_hours: u8,
// number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
#[serde(default)]
pub keep_stats_for_days: u64, // 0 means never prune
pub jwt_secret: String,
#[serde(default = "default_jwt_valid_for")]
pub jwt_valid_for: Timelength,
// interval at which to collect server stats and alert for out of bounds
pub monitoring_interval: Timelength,
// used to verify validity from github webhooks
pub github_webhook_secret: String,
// sent in auth header with req to periphery
pub passkey: String,
// integration with slack app
pub slack_url: Option<String>,
// enable login with local auth
pub local_auth: bool,
pub mongo: MongoConfig,
#[serde(default)]
pub github_oauth: OauthCredentials,
#[serde(default)]
pub google_oauth: OauthCredentials,
#[serde(default)]
pub aws: AwsBuilderConfig,
}
fn default_core_port() -> u16 {
9000
}
fn default_jwt_valid_for() -> Timelength {
Timelength::OneWeek
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct OauthCredentials {
#[serde(default)]
pub enabled: bool,
#[serde(default)]
pub id: String,
#[serde(default)]
pub secret: String,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct MongoConfig {
pub uri: String,
#[serde(default = "default_core_mongo_app_name")]
pub app_name: String,
#[serde(default = "default_core_mongo_db_name")]
pub db_name: String,
}
fn default_core_mongo_app_name() -> String {
"monitor_core".to_string()
}
fn default_core_mongo_db_name() -> String {
"monitor".to_string()
}
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct AwsBuilderConfig {
#[serde(skip_serializing)]
pub access_key_id: String,
#[serde(skip_serializing)]
pub secret_access_key: String,
pub default_ami_id: String,
pub default_subnet_id: String,
pub default_key_pair_name: String,
#[serde(default)]
pub available_ami_accounts: AvailableAmiAccounts,
#[serde(default = "default_aws_region")]
pub default_region: String,
#[serde(default = "default_volume_gb")]
pub default_volume_gb: i32,
#[serde(default = "default_instance_type")]
pub default_instance_type: String,
#[serde(default)]
pub default_security_group_ids: Vec<String>,
#[serde(default)]
pub default_assign_public_ip: bool,
}
fn default_aws_region() -> String {
String::from("us-east-1")
}
fn default_volume_gb() -> i32 {
8
}
fn default_instance_type() -> String {
String::from("m5.2xlarge")
}
pub type AvailableAmiAccounts = HashMap<String, AmiAccounts>; // (ami_id, AmiAccounts)
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct AmiAccounts {
pub name: String,
#[serde(default)]
pub github: Vec<String>,
#[serde(default)]
pub docker: Vec<String>,
}
pub type GithubUsername = String;
pub type GithubToken = String;
pub type GithubAccounts = HashMap<GithubUsername, GithubToken>;
pub type DockerUsername = String;
pub type DockerToken = String;
pub type DockerAccounts = HashMap<DockerUsername, DockerToken>;
pub type SecretsMap = HashMap<String, String>;
#[derive(Serialize, Deserialize, Debug)]
pub struct PeripheryConfig {
#[serde(default = "default_periphery_port")]
pub port: u16,
#[serde(default = "default_repo_dir")]
pub repo_dir: String,
#[serde(default = "default_stats_refresh_interval")]
pub stats_polling_rate: Timelength,
#[serde(default)]
pub allowed_ips: Vec<IpAddr>,
#[serde(default)]
pub passkeys: Vec<String>,
#[serde(default)]
pub secrets: SecretsMap,
#[serde(default)]
pub github_accounts: GithubAccounts,
#[serde(default)]
pub docker_accounts: DockerAccounts,
}
fn default_periphery_port() -> u16 {
8000
}
fn default_repo_dir() -> String {
"/repos".to_string()
}
fn default_stats_refresh_interval() -> Timelength {
Timelength::FiveSeconds
}
#[derive(Serialize, Deserialize, Debug, Display, EnumString, PartialEq, Hash, Eq, Clone, Copy)]
pub enum RestartMode {
#[serde(rename = "no")]
#[strum(serialize = "no")]
NoRestart,
#[serde(rename = "on-failure")]
#[strum(serialize = "on-failure")]
OnFailure,
#[serde(rename = "always")]
#[strum(serialize = "always")]
Always,
#[serde(rename = "unless-stopped")]
#[strum(serialize = "unless-stopped")]
UnlessStopped,
}

View File

@@ -1,5 +1,5 @@
# this should be the url used to access monitor in browser, potentially behind DNS, eg https://monitor.mogh.tech or http://12.34.56.78:9000
host = "http://localhost:9000"
host = "https://monitor.mogh.tech"
# the port the core system will run on. if running core in docker container, leave as this port as 9000 and use port bind eg. -p 9001:9000
port = 9000
@@ -19,9 +19,12 @@ jwt_valid_for = "1-wk"
# webhook url given by slack app
slack_url = "your_slack_app_webhook_url"
# token that has to be given to github during webhook config as the Secret
# token that has to be given to github during webhook config as the secret
github_webhook_secret = "your_random_webhook_secret"
# optional. an alternate base url that is used to recieve github webhook requests. if not provided, will use 'host' address as base
github_webhook_base_url = "https://monitor-github-webhook.mogh.tech"
# token used to authenticate core requests to periphery
passkey = "your_random_passkey"
@@ -31,6 +34,9 @@ monitoring_interval = "1-min"
# allow or deny user login with username / password
local_auth = true
# these will be given in the GUI to attach to builds. New build docker orgs will default to first org (or none if empty).
docker_organizations = ["your_docker_org1", "your_docker_org_2"]
[aws]
access_key_id = "your_aws_key_id"
secret_access_key = "your_aws_secret_key"

View File

@@ -1,6 +1,6 @@
[package]
name = "core"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -15,9 +15,9 @@ tokio = { version = "1.25", features = ["full"] }
tokio-tungstenite = { version = "0.18", features=["native-tls"] }
tokio-util = "0.7"
axum = { version = "0.6", features = ["ws", "json"] }
axum-extra = { version = "0.5", features = ["spa"] }
axum-extra = { version = "0.5.0", features = ["spa"] }
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.3", features = ["cors"] }
tower-http = { version = "0.4.0", features = ["cors"] }
slack = { package = "slack_client_rs", version = "0.0.8" }
mungos = "0.3.3"
serde = "1.0"
@@ -34,4 +34,6 @@ async_timing_util = "0.1.14"
futures-util = "0.3"
diff-struct = "0.5"
typeshare = "1.0.0"
hex = "0.4"
hex = "0.4"
aws-config = "0.54"
aws-sdk-ec2 = "0.24"

View File

@@ -2,19 +2,23 @@ use std::time::Duration;
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::{
all_logs_success,
aws::{self, create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance},
to_monitor_name,
};
use helpers::{all_logs_success, to_monitor_name};
use mungos::{doc, to_bson};
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Build, Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget, Version,
AwsBuilderBuildConfig, Build, Log, Operation, PermissionLevel, Update, UpdateStatus,
UpdateTarget, Version,
};
use crate::{auth::RequestUser, state::State};
use crate::{
auth::RequestUser,
cloud::aws::{
self, create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance,
},
helpers::empty_or_only_spaces,
state::State,
};
const BUILDER_POLL_RATE_SECS: u64 = 2;
const BUILDER_POLL_MAX_TRIES: usize = 30;
@@ -51,6 +55,12 @@ impl State {
let start_ts = monitor_timestamp();
let build = Build {
name: to_monitor_name(name),
docker_organization: self
.config
.docker_organizations
.get(0)
.map(|d| d.to_string()),
aws_config: Some(AwsBuilderBuildConfig::default()),
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
@@ -180,6 +190,17 @@ impl State {
new_build.created_at = current_build.created_at.clone();
new_build.updated_at = start_ts.clone();
// filter out any build args that contain empty strings
// these could only happen by accident
new_build.docker_build_args = new_build.docker_build_args.map(|mut args| {
args.build_args = args
.build_args
.into_iter()
.filter(|a| !empty_or_only_spaces(&a.variable) && !empty_or_only_spaces(&a.value))
.collect();
args
});
self.db
.builds
.update_one(&new_build.id, mungos::Update::Regular(new_build.clone()))
@@ -248,7 +269,7 @@ impl State {
async fn build_inner(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
let mut build = self
.get_build_check_permissions(build_id, user, PermissionLevel::Update)
.get_build_check_permissions(build_id, user, PermissionLevel::Execute)
.await?;
build.version.increment();
let mut update = Update {
@@ -319,7 +340,7 @@ impl State {
let clone_success = match self.periphery.clone_repo(&server.server, &build).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
true
all_logs_success(&update.logs)
}
Err(e) => {
update
@@ -422,10 +443,17 @@ impl State {
self.config.aws.secret_access_key.clone(),
)
.await;
let ami_id = aws_config
.ami_id
let ami_name = aws_config
.ami_name
.as_ref()
.unwrap_or(&self.config.aws.default_ami_id);
.unwrap_or(&self.config.aws.default_ami_name);
let ami_id = &self
.config
.aws
.available_ami_accounts
.get(ami_name)
.ok_or(anyhow!("no ami id associated with ami name {ami_name}"))?
.ami_id;
let instance_type = aws_config
.instance_type
.as_ref()
@@ -476,19 +504,19 @@ impl State {
let start_connect_ts = monitor_timestamp();
let mut res = Ok(String::new());
for _ in 0..BUILDER_POLL_MAX_TRIES {
let status = self.periphery.health_check(&instance.server).await;
if let Ok(_) = status {
let version = self.periphery.get_version(&instance.server).await;
if let Ok(version) = version {
let connect_log = Log {
stage: "build instance connected".to_string(),
success: true,
stdout: "established contact with periphery on builder".to_string(),
stdout: format!("established contact with periphery on builder\nperiphery version: v{version}"),
start_ts: start_connect_ts,
end_ts: monitor_timestamp(),
..Default::default()
};
return Ok((instance, Some(aws_client), vec![start_log, connect_log]));
}
res = status;
res = version;
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS)).await;
}
let _ = terminate_ec2_instance(&aws_client, &instance.instance_id).await;

View File

@@ -9,7 +9,7 @@ use types::{
use crate::{
auth::RequestUser,
helpers::{any_option_diff_is_some, option_diff_is_some},
helpers::{any_option_diff_is_some, empty_or_only_spaces, get_image_name, option_diff_is_some},
state::State,
};
@@ -197,6 +197,33 @@ impl State {
new_deployment.created_at = current_deployment.created_at.clone();
new_deployment.updated_at = start_ts.clone();
// filter out any volumes, ports, env vars, extra args which are or contain empty strings
// these could only happen by accident
new_deployment.docker_run_args.volumes = new_deployment
.docker_run_args
.volumes
.into_iter()
.filter(|v| !empty_or_only_spaces(&v.local) && !empty_or_only_spaces(&v.container))
.collect();
new_deployment.docker_run_args.ports = new_deployment
.docker_run_args
.ports
.into_iter()
.filter(|p| !empty_or_only_spaces(&p.local) && !empty_or_only_spaces(&p.container))
.collect();
new_deployment.docker_run_args.environment = new_deployment
.docker_run_args
.environment
.into_iter()
.filter(|e| !empty_or_only_spaces(&e.variable) && !empty_or_only_spaces(&e.value))
.collect();
new_deployment.docker_run_args.extra_args = new_deployment
.docker_run_args
.extra_args
.into_iter()
.filter(|a| a.len() != 0)
.collect();
self.db
.deployments
.update_one(
@@ -343,14 +370,12 @@ impl State {
.await?;
let version = if let Some(build_id) = &deployment.build_id {
let build = self.db.get_build(build_id).await?;
let image = if let Some(docker_account) = &build.docker_account {
if deployment.docker_run_args.docker_account.is_none() {
let image = get_image_name(&build);
if deployment.docker_run_args.docker_account.is_none() {
if let Some(docker_account) = &build.docker_account {
deployment.docker_run_args.docker_account = Some(docker_account.to_string())
}
format!("{docker_account}/{}", to_monitor_name(&build.name))
} else {
to_monitor_name(&build.name)
};
};
}
let version = if let Some(version) = &deployment.build_version {
version.clone()
} else {

View File

@@ -218,6 +218,12 @@ pub fn router() -> Router {
})
}),
)
.route(
"/docker_organizations",
get(|Extension(state): StateExtension| async move {
Json(state.config.docker_organizations.clone())
}),
)
}
impl State {

View File

@@ -1,20 +1,21 @@
use anyhow::Context;
use anyhow::{anyhow, Context};
use axum::{
body::Body,
extract::Path,
http::{Request, StatusCode},
middleware,
routing::get,
routing::{get, post},
Extension, Json, Router,
};
use futures_util::Future;
use helpers::handle_anyhow_error;
use mungos::Deserialize;
use types::User;
use mungos::{doc, Deserialize};
use types::{PermissionLevel, UpdateTarget, User};
use typeshare::typeshare;
use crate::{
auth::{auth_request, JwtExtension, RequestUserExtension},
state::StateExtension,
auth::{auth_request, JwtExtension, RequestUser, RequestUserExtension},
state::{State, StateExtension},
};
pub mod build;
@@ -27,6 +28,13 @@ pub mod secret;
pub mod server;
pub mod update;
#[typeshare]
#[derive(Deserialize)]
struct UpdateDescriptionBody {
target: UpdateTarget,
description: String,
}
pub fn router() -> Router {
Router::new()
.route(
@@ -45,6 +53,30 @@ pub fn router() -> Router {
.map_err(handle_anyhow_error)
}),
)
.route(
"/github_webhook_base_url",
get(|state: StateExtension| async move {
state
.config
.github_webhook_base_url
.as_ref()
.unwrap_or(&state.config.host)
.to_string()
}),
)
.route(
"/update_description",
post(
|state: StateExtension,
user: RequestUserExtension,
body: Json<UpdateDescriptionBody>| async move {
state
.update_description(&body.target, &body.description, &user)
.await
.map_err(handle_anyhow_error)
},
),
)
.route("/users", get(get_users))
.nest("/build", build::router())
.nest("/deployment", deployment::router())
@@ -117,3 +149,57 @@ where
.map_err(handle_anyhow_error)?;
Ok(res)
}
impl State {
pub async fn update_description(
&self,
target: &UpdateTarget,
description: &str,
user: &RequestUser,
) -> anyhow::Result<()> {
match target {
UpdateTarget::Build(id) => {
self.get_build_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.builds
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Deployment(id) => {
self.get_deployment_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.deployments
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Server(id) => {
self.get_server_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.servers
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Group(id) => {
self.get_group_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.groups
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
UpdateTarget::Procedure(id) => {
self.get_procedure_check_permissions(id, user, PermissionLevel::Update)
.await?;
self.db
.procedures
.update_one::<()>(id, mungos::Update::Set(doc! { "description": description }))
.await?;
}
_ => return Err(anyhow!("invalid target: {target:?}")),
}
Ok(())
}
}

View File

@@ -379,4 +379,4 @@ async fn modify_user_create_build_permissions(
};
update.id = state.add_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -1,9 +1,7 @@
use std::str::FromStr;
use anyhow::{anyhow, Context};
use axum::{extract::Query, routing::get, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, to_bson, ObjectId};
use mungos::{doc, to_bson};
use serde_json::Value;
use types::{PermissionLevel, Update, UpdateTarget};
@@ -159,7 +157,7 @@ impl State {
.await
.context("failed at query to get users builds")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let deployment_ids = self
.db
@@ -168,7 +166,7 @@ impl State {
.await
.context("failed at query to get users deployments")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let server_ids = self
.db
@@ -177,7 +175,7 @@ impl State {
.await
.context("failed at query to get users servers")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let procedure_ids = self
.db
@@ -186,7 +184,7 @@ impl State {
.await
.context("failed at query to get users procedures")?
.into_iter()
.map(|e| ObjectId::from_str(&e.id).unwrap())
.map(|e| e.id)
.collect::<Vec<_>>();
let filter = doc! {
"$or": [

1
core/src/cloud/mod.rs Normal file
View File

@@ -0,0 +1 @@
pub mod aws;

View File

@@ -2,6 +2,8 @@ use std::str::FromStr;
use anyhow::anyhow;
use diff::{Diff, OptionDiff};
use helpers::to_monitor_name;
use types::Build;
#[macro_export]
macro_rules! response {
@@ -41,3 +43,26 @@ pub fn parse_comma_seperated_list<T: FromStr>(comma_sep_list: &str) -> anyhow::R
})
.collect()
}
pub fn get_image_name(build: &Build) -> String {
let name = to_monitor_name(&build.name);
match &build.docker_organization {
Some(org) => format!("{org}/{name}"),
None => match &build.docker_account {
Some(acct) => format!("{acct}/{name}"),
None => name,
},
}
}
pub fn empty_or_only_spaces(word: &str) -> bool {
if word.len() == 0 {
return true;
}
for char in word.chars() {
if char != ' ' {
return false;
}
}
return true;
}

View File

@@ -9,6 +9,7 @@ use tower_http::cors::{Any, CorsLayer};
mod actions;
mod api;
mod auth;
mod cloud;
mod config;
mod helpers;
mod monitoring;

View File

@@ -78,7 +78,6 @@ impl State {
}
let futures = servers.unwrap().into_iter().map(|server| async move {
let _ = self.periphery.image_prune(&server).await;
let _ = self.periphery.container_prune(&server).await;
});
join_all(futures).await;
}

View File

@@ -1,22 +1,50 @@
# building images
Monitor builds docker images by cloning the source repository from Github and running ```docker build``` on the configured Dockerfile, which should be present in the source repository.
Monitor builds docker images by cloning the source repository from Github, running ```docker build```, and pushing the resulting image to docker hub. Any repo containing a 'Dockerfile' is buildable using this method.
Build configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](https://github.com/mbecker20/monitor/blob/main/docs/paths.md).
## repo configuration
Setting related to the github repo are under the *repo* tab on respective build's page.
To specify the github repo to build, just give it the name of the repo and the branch under *github config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo.
To specify the github repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo.
Many repos are private, in this case a Github access token is required in the periphery.config.toml of the building server. these are specified in the config like ```username = "access_token"```. An account which has access to the repo and is available on the periphery server can be selected to use via the *github account* dropdown menu.
Sometimes a command needs to be run when the repo is cloned, you can configure this in the *on clone* section.
## docker build configuration
There are two fields to pass for *on clone*. the first is *path*, which changes to working directory. To run the command in the root of the repo, just pass ".". The second field is *command*, this is the shell command to be executed after the repo is cloned.
In order to docker build, monitor just needs to know the build directory and the path of the Dockerfile, you can configure these in the *build config* section.
For example, say your repo had a folder in it called "scripts" with a shell script "on-clone.sh". You would give *path* as "scripts" and command as "sh on-clone.sh". Or you could make *path* just "." and then command would be "sh scripts/on-clone.sh". Either way works fine.
If the build directory is the root of the repository, you pass the build path as ```.```. If the build directory is some folder of the repo, just pass the name of the the folder. Do not pass the preceding "/". for example ```build/directory```
## build configuration
The dockerfile's path is given relative to the build directory. So if your build directory is ```build/directory``` and the dockerfile is in ```build/directory/Dockerfile.example```, you give the dockerfile path simply as ```Dockerfile.example```.
Just as with private repos, you will need to select a docker account to use with ```docker push```.
## running a pre build command
Sometimes a command needs to be run before running ```docker build```, you can configure this in the *pre build* section.
There are two fields to pass for *pre build*. the first is *path*, which changes the working directory. To run the command in the root of the repo, just pass ```.```. The second field is *command*, this is the shell command to be executed after the repo is cloned.
For example, say your repo had a folder in it called ```scripts``` with a shell script ```on-clone.sh```. You would give *path* as ```scripts``` and command as ```sh on-clone.sh```. Or you could make *path* just ```.``` and then the command would be ```sh scripts/on-clone.sh```. Either way works fine.
## adding build args
The Dockerfile may make use of [build args](https://docs.docker.com/engine/reference/builder/#arg). Build args can be passed using the gui by pressing the ```edit``` button. They are passed in the menu just like in the would in a .env file:
```
BUILD_ARG1=some_value
BUILD_ARG2=some_other_value
```
## builder configuration
A builder is a machine running monitor periphery and docker. Any server connected to monitor can be chosen as the builder for a build.
Building on a machine running production software is usually not a great idea, as this process can use a lot of the system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Right now monitor supports AWS ec2 for this task.
### AWS builder
You can choose to build on AWS on the "builder" tab on the build's page. From here you can configure the AMI to use as a base to build the image. These must be configured in the monitor core configuration along with other information like defaults to use, AWS credentials, etc. This is explained on the [core setup page](https://github.com/mbecker20/monitor/blob/main/docs/setup.md).
## versioning
@@ -25,4 +53,4 @@ Monitor uses a major.minor.patch versioning scheme. Every build will auto increm
[next: deploying](https://github.com/mbecker20/monitor/blob/main/docs/deployments.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)

View File

@@ -1,3 +1,103 @@
## deploying applications
# deploying applications
Monitor can deploy any docker images that it can access with the configured docker accounts. It works by parsing the deployment configuration into a ```docker run``` command. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well.
Deployment configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](https://github.com/mbecker20/monitor/blob/main/docs/paths.md).
## configuring the image
There are two options to configure the deployed image.
### attaching a monitor build
If the software you want to deploy is built by monitor, you can attach the build directly to the deployment.
By default, monitor will deploy the latest available version of the build, or you can specify a specific version using the version dropdown.
Also by default, monitor will use the same docker account that is attached to the build in order to pull the image on the periphery server. If that account is not available on the server, you can specify another available account to use instead, this account just needs to have read access to the docker repository.
### using a custom image
You can also manually specify an image name, like ```mongo``` or ```mbecker2020/random_image:0.1.1```.
If the image repository is private, you can select an available docker account to use to pull the image.
## configuring the network
One feature of docker is that it allows for the creation of [virtual networks between containers](https://docs.docker.com/network/). Monitor allows you to specify a docker virtual network to connect the container to, or to use the host system networking to bypass the docker virtual network.
The default selection is ```host```, which bypasses the docker virtual network layer.
If you do select select a network other than host, you can specify port bindings with the GUI. For example, if you are running mongo (which defaults to port 27017), you could use the mapping:
```
27018 : 27017
```
In this case, you would access mongo from outside of the container on port ```27018```.
Note that this is not the only affect of using a network other than ```host```. For example, containers running on different networks can not communicate, and ones on the same network can not reach other containers on ```localhost``` even when they are running on the same system. This behavior can be a bit confusing if you are not familiar with it, and it can be bypassed entirely by just using ```host``` network.
## configuring restart behavior
Docker, like systemd, has a couple options for handling when a container exits. See [docker restart policies](https://docs.docker.com/config/containers/start-containers-automatically/). Monitor allows you to select the appropriate restart behavior from these options.
## configuring environment variables
Monitor enables you to easily manage environment variables passed to the container. In the GUI, click the 'edit' button on the 'environment' card, this will bring up the environment menu.
You pass environment variables just as you would with a ```.env``` file:
```
ENV_VAR_1=some_value
ENV_VAR_2=some_other_value
```
## configuring volumes
A docker container's filesystem is segregated from that of the host. However, it is still possible for a container to access system files and directories, this is accomplished by using [bind mounts](https://docs.docker.com/storage/bind-mounts/).
Say your container needs to read a config file located on the system at ```/home/ubuntu/config.toml```. You can specify the bind mount to be:
```
/home/ubuntu/config.toml : /config/config.toml
```
The first path is the one on the system, the second is the path in the container. Your application would then read the file at ```/config/config.toml``` in order to load its contents.
These can be configured easily with the GUI in the 'volumes' card. You can configure as many bind mounts as you need.
## extra args
Not all features of docker are mapped directly by monitor, only the most common. You can still specify any custom flags for monitor to include in the ```docker run``` command by utilizing 'extra args'. For example, you can enable log rotation using these two extra args:
```
--log-opt max-size=10M
```
```
--log-opt max-file=3
```
## post image
Sometimes you need to specify some flags to be passed directly to the application. What is put here is inserted into the docker run command after the image. For example, to pass the ```--quiet``` flag to MongoDB, the docker run command would be:
```
docker run -d --name mongo-db mongo:6.0.3 --quiet
```
In order to achieve this with monitor, just pass ```--quiet``` to 'post image'.
## container lifetime management
The lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. The lifetime management actions monitor presents to the user is relative to the containers state. For example, when the container is ```running```, you can either stop it, destroy it, or redeploy it.
### stopping a container
Sometimes you want to stop a running application but preserve its logs and configuration, either to be restarted later or to view the logs at a later time. It is more like *pausing* the application with its current config, as no configuration (like environment variable, volume mounts, etc.) will be changed when the container is started again. In order to restart an application with updated configuration, it must be *redeployed*.
### container redeploy
redeploying is the action of destroying a container and recreating it. If you update deployment config, these changes will not take effect until the container is redeployed. Just note this will destroy the previous containers logs along with the container itself.
[next: permissions](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)

38
docs/paths.md Normal file
View File

@@ -0,0 +1,38 @@
# File Paths
When working with monitor, you might have to configure file or directory paths.
## Relative Paths
Where possible, it is better to use relative file paths. Using relative file paths removes the connection between the process being run and the particular server it runs one, making it easier to move things between servers.
Where you see relative paths:
- setting the build directory and path of the Dockerfile
- setting a pre build command path
- configuring a frontend mount (used for web apps)
For all of the above, the path can be given relative to the root of the configured repo
The one exception is the Dockerfile path, which is given relative to the build directory (This is done by Docker itself, and this pattern matches usage of the Docker CLI).
There are 3 kinds of paths to pass:
1. to specify the root of the repo, use ```.``` as the path
2. to specify a folder in the repo, pass it with **no** preceding ```/```. For example, ```example_folder``` or ```folder1/folder2```
3. to specify an absolute path on the servers filesystem, use a preceding slash, eg. ```/home/ubuntu/example```. This way should only be used if absolutely necessary.
### Implementation
relative file paths are joined with the path of the repo on the system using a Rust [PathBuf](https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push).
## Docker Volume Paths
These are passed directly to the Docker CLI using ```--volume /path/on/system:/path/in/container```. So for these, the same rules apply as when using Docker on the command line. Paths here should be given as absolute, don't use ```~``` or even ```$HOME```.

View File

@@ -1 +1,32 @@
# permissioning resources
All monitor resources (servers, builds, deployment) have independant permission tables to allow for users to have granular access to these resources. By default, users do not see any resources until they are given at least read permissions.
## permission levels
There are 4 levels of permissions a user can have on a resource:
1. **None**. This is the lowest permission level, and means the user will not have any access to this resource. They will not see it in the GUI, and it will not show up if the user queries the core API directly. All attempts to view or update the resource will be blocked.
2. **Read**. This is the first permission level that grants any access. It will enable the user to see the resource in the GUI, read the configuration, and see any logs. Any attempts to update configuration or trigger any action will be blocked.
3. **Execute**. This level will allow the user to execute actions on the resource, like send a build command or trigger a redeploy. The user will still be blocked from updating configuration on the resource.
4. **Update**. The user has full access to the resource, they can execute any actions, update the configuration, and delete the resource.
## Administration
Users can be given admin priviledges by accessing the monitor MongoDB and setting ```admin: true``` on the intended user document. These users have unrestricted access to all monitor resources, like servers, builds, and deployments. Additionally, only these users can update other (non-admin) user's permissions on resources, an action not available to regular users even with **Update** level permissions.
Monitor admins are responsible for managing user accounts as well. When a user logs into monitor for the first time, they will not immediately be granted access. An admin must first **enable** the user, which can be done from the 'manage users' page (found in the user dropdown menu in the topbar). Users can also be **disabled** by an admin at any time, which blocks all their access to the GUI and API.
Users also have some configurable global permissions, these are:
- create server permission
- create build permission
Only users with these permissions (as well as admins) can add additional servers to monitor, and can create additional builds, respectively.
[next: core setup](https://github.com/mbecker20/monitor/blob/main/docs/setup.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)

View File

@@ -11,20 +11,46 @@ The easiest way to do this is to follow the [monitor guide](https://github.com/m
### manual install steps
1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases) or install it using [cargo](https://crates.io/crates/monitor_periphery). If the monitor cli.
2. Create and edit ~/.monitor/periphery.config.toml, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). The file can be anywhere, it can be passed to periphery via the --config-path flag or with the CONFIG_PATH environment variable. The monitor cli can also be used: ```monitor periphery gen-config```
1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases).
2. Create and edit your config files, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). The monitor cli can be used to add the boilerplate: ```monitor periphery gen-config --path /path/to/config.toml```. The files can be anywhere, and can be passed to periphery via the ```--config-path``` flag.
3. Ensure that inbound connectivity is allowed on the port specified in periphery.config.toml (default 8000).
4. Install docker. Make sure whatever user periphery is run as has access to the docker group without sudo.
5. Start the periphery binary with your preferred process manager, like systemd. The config read from the file is printed on startup, ensure that it is as expected.
## example periphery start command
```
periphery \
--config-path /path/to/periphery.config.base.toml \
--config-path /other_path/to/periphery.config.overide.toml \
--merge-nested-config \
--home_dir /home/username
```
## passing config files
when you pass multiple config files, later --config-path given in the command will always overide previous ones.
there are two ways to merge config files. The default behavior is to completely replace any base fields with whatever fields are present in the overide config. So if you pass ```allowed_ips = []``` in your overide config, the final allowed_ips will be an empty list as well.
```--merge-nested-config``` will merge config fields recursively and extend config array fields.
For example, with ```--merge-nested-config``` you can specify an allowed ip in the base config, and another in the overide config, they will both be present in the final config.
Similarly, you can specify a base docker / github account pair, and extend them with additional accounts in the overide config.
## adding the server to monitor
The easiest way to add the server is with the GUI. On the home page, click the + button to the right of the server search bar, configure the name and address of the server. The address is the full http/s url to the periphery server, eg http://12.34.56.78:8000.
Once it is added, you can use access the GUI to modify some config, like the alerting thresholds for cpu, memory and disk usage. A server can also be temporarily disabled, this will prevent alerting if it goes offline.
Since no state is stored on the periphery servers, you can easily redirect all builds / deployments to be hosted on a different server. Just update the address to point to the new server.
Since no state is stored on the periphery servers, you can easily redirect all deployments to be hosted on a different server. Just update the address to point to the new server.
[next: building](https://github.com/mbecker20/monitor/blob/main/docs/builds.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)

View File

@@ -17,7 +17,7 @@
<body>
<noscript>You need to enable JavaScript to run this app.</noscript>
<div id="root" class="app"></div>
<div id="root" class="app-bounder"></div>
<script src="/src/index.tsx" type="module"></script>
</body>

View File

@@ -14,7 +14,7 @@ const Account = lazy(() => import("./components/Account"));
const App: Component = () => {
const { user } = useUser();
return (
<>
<div class="app">
<Topbar />
<Routes>
<Route path="/" component={Home} />
@@ -27,7 +27,7 @@ const App: Component = () => {
<Route path="/users" component={Users} />
</Show>
</Routes>
</>
</div>
);
};

View File

@@ -0,0 +1,128 @@
import { Component, createSignal, onMount, Show } from "solid-js";
import { client, pushNotification } from "..";
import { useAppState } from "../state/StateProvider";
import { UpdateTarget } from "../types";
import { useToggle } from "../util/hooks";
import ConfirmButton from "./shared/ConfirmButton";
import Flex from "./shared/layout/Flex";
import Grid from "./shared/layout/Grid";
import Loading from "./shared/loading/Loading";
import CenterMenu from "./shared/menu/CenterMenu";
import TextArea from "./shared/TextArea";
const Description: Component<{
name: string;
target: UpdateTarget;
description?: string;
userCanUpdate: boolean;
}> = (p) => {
const [show, toggleShow] = useToggle();
const description = () => {
if (p.description) {
return p.description;
} else {
return "add a description";
}
};
const [width, setWidth] = createSignal<number>();
onMount(() => {
setWidth(ref!?.clientWidth);
});
let ref: HTMLDivElement;
return (
<CenterMenu
show={show}
toggleShow={toggleShow}
title={`description | ${p.name}`}
targetClass="card grey"
targetStyle={{ width: "100%", "justify-content": "flex-start" }}
target={
<div
ref={ref! as any}
class="ellipsis"
style={{
opacity: 0.7,
width: width() ? `${width()}px` : "100%",
"box-sizing": "border-box",
"text-align": "left"
}}
>
{width() ? description() : ""}
</div>
}
content={() => (
<DescriptionMenu
target={p.target}
description={p.description}
userCanUpdate={p.userCanUpdate}
toggleShow={toggleShow}
/>
)}
/>
);
};
const DescriptionMenu: Component<{
target: UpdateTarget;
description?: string;
userCanUpdate: boolean;
toggleShow: () => void;
}> = (p) => {
const { builds, servers, deployments } = useAppState();
let ref: HTMLTextAreaElement;
onMount(() => {
ref?.focus();
});
const [desc, setDesc] = createSignal(p.description);
const [loading, setLoading] = createSignal(false);
const update_description = () => {
if (!p.userCanUpdate) return;
setLoading(true);
client
.update_description({ target: p.target, description: desc() || "" })
.then(() => {
if (p.target.type === "Build") {
builds.update({ ...builds.get(p.target.id)!, description: desc() });
} else if (p.target.type === "Deployment") {
const deployment = deployments.get(p.target.id)!;
deployments.update({
...deployment,
deployment: { ...deployment.deployment, description: desc() },
});
} else if (p.target.type === "Server") {
const server = servers.get(p.target.id)!;
servers.update({
...server,
server: { ...server.server, description: desc() },
});
}
p.toggleShow();
})
.catch(() => {
pushNotification("bad", "failed to update description");
p.toggleShow();
});
};
return (
<Grid placeItems="center">
<TextArea
ref={ref! as any}
placeholder="add a description"
value={desc()}
onEdit={setDesc}
onEnter={update_description}
style={{ width: "700px", "max-width": "90vw", padding: "1rem" }}
disabled={!p.userCanUpdate}
/>
<Show when={p.userCanUpdate}>
<Show when={!loading()} fallback={<Loading />}>
<button class="green" onClick={update_description}>
update
</button>
</Show>
</Show>
</Grid>
);
};
export default Description;

View File

@@ -1,19 +1,24 @@
import { Component } from "solid-js";
import { Component, Show } from "solid-js";
import Grid from "./shared/layout/Grid";
import Loading from "./shared/loading/Loading";
const NotFound: Component<{ type: "deployment" | "server" | "build" }> = (p) => {
return (
<Grid
placeItems="center"
style={{ height: "100%", width: "100%" }}
>
<Grid placeItems="center" style={{ width: "fit-content", height: "fit-content" }}>
<h2>{p.type} at id not found</h2>
const NotFound: Component<{
type: "deployment" | "server" | "build";
loaded: boolean;
}> = (p) => {
return (
<Grid placeItems="center" style={{ height: "100%", width: "100%" }}>
<Grid
placeItems="center"
style={{ width: "fit-content", height: "fit-content" }}
>
<Show when={p.loaded} fallback={<h2>loading {p.type}...</h2>}>
<h2>{p.type} at id not found</h2>
</Show>
<Loading type="sonar" />
</Grid>
</Grid>
);
}
};
export default NotFound;
export default NotFound;

View File

@@ -2,7 +2,9 @@ import { useNavigate, useParams } from "@solidjs/router";
import { Component, createEffect, onCleanup, Show } from "solid-js";
import { useAppDimensions } from "../../state/DimensionProvider";
import { useAppState } from "../../state/StateProvider";
import { Operation } from "../../types";
import { useUser } from "../../state/UserProvider";
import { Operation, PermissionLevel } from "../../types";
import Description from "../Description";
import NotFound from "../NotFound";
import Grid from "../shared/layout/Grid";
import Actions from "./Actions";
@@ -12,6 +14,7 @@ import BuildTabs from "./tabs/Tabs";
import Updates from "./Updates";
const Build: Component<{}> = (p) => {
const { user, user_id } = useUser();
const { builds, ws } = useAppState();
const navigate = useNavigate();
const params = useParams();
@@ -31,8 +34,11 @@ const Build: Component<{}> = (p) => {
});
});
onCleanup(() => unsub);
const userCanUpdate = () =>
user().admin ||
build()?.permissions![user_id()] === PermissionLevel.Update;
return (
<Show when={build()} fallback={<NotFound type="build" />}>
<Show when={build()} fallback={<NotFound type="build" loaded={builds.loaded()} />}>
<ActionStateProvider build_id={params.id}>
<Grid
style={{
@@ -44,8 +50,14 @@ const Build: Component<{}> = (p) => {
style={{ width: "100%" }}
gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}
>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto 1fr" }}>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto auto 1fr" }}>
<Header />
<Description
target={{ type: "Build", id: params.id }}
name={build()?.name!}
description={build()?.description}
userCanUpdate={userCanUpdate()}
/>
<Actions />
</Grid>
<Show when={!isSemiMobile()}>

View File

@@ -55,7 +55,7 @@ const Header: Component<{}> = (p) => {
client.delete_build(params.id);
}}
class="red"
title={`delete build | ${build().name}`}
title="delete build"
match={build().name}
>
<Icon type="trash" />

View File

@@ -6,7 +6,7 @@ import SimpleTabs from "../../shared/tabs/SimpleTabs";
import { Tab } from "../../shared/tabs/Tabs";
import BuilderConfig from "./builder/BuilderConfig";
import BuildConfig from "./config/BuildConfig";
import Owners from "./Permissions";
import Permissions from "./Permissions";
import { ConfigProvider } from "./Provider";
const BuildTabs: Component<{}> = (p) => {
@@ -30,8 +30,8 @@ const BuildTabs: Component<{}> = (p) => {
element: () => <BuilderConfig />
},
user().admin && {
title: "collaborators",
element: () => <Owners />,
title: "permissions",
element: () => <Permissions />,
},
].filter((e) => e) as Tab[]
}

View File

@@ -22,21 +22,9 @@ const AwsBuilderConfig: Component<{}> = (p) => {
const Ami: Component = () => {
const { aws_builder_config } = useAppState();
const { build, setBuild, userCanUpdate } = useConfig();
const get_ami_id = () => {
if (build.aws_config?.ami_id) {
return build.aws_config.ami_id;
} else {
return aws_builder_config()?.default_ami_id || "unknown";
}
};
const get_ami_name = (ami_id: string) => {
if (aws_builder_config() === undefined || ami_id === "unknown")
return "unknown";
return (
aws_builder_config()!.available_ami_accounts![ami_id]?.name || "unknown"
);
};
const ami_ids = () => {
const default_ami_name = () => aws_builder_config()?.default_ami_name;
const get_ami_name = () => build.aws_config?.ami_name || aws_builder_config()?.default_ami_name || "unknown";
const ami_names = () => {
if (aws_builder_config() === undefined) return [];
return Object.keys(aws_builder_config()!.available_ami_accounts!);
};
@@ -49,10 +37,16 @@ const Ami: Component = () => {
<h1>ami</h1>
<Selector
targetClass="blue"
selected={get_ami_id()}
items={ami_ids()}
onSelect={(ami_id) => setBuild("aws_config", "ami_id", ami_id)}
itemMap={get_ami_name}
selected={get_ami_name()}
items={ami_names()}
onSelect={(ami_name) => {
if (ami_name === default_ami_name()) {
setBuild("aws_config", "ami_name", undefined);
} else {
setBuild("aws_config", "ami_name", ami_name);
}
}}
itemMap={(i) => i.replaceAll("_", " ")}
position="bottom right"
disabled={!userCanUpdate()}
useSearch

View File

@@ -23,6 +23,9 @@ const BuilderConfig: Component<{}> = (p) => {
<Show when={build.aws_config}>
<AwsBuilderConfig />
</Show>
<Show when={!build.server_id && !build.aws_config}>
<div style={{ height: "12rem" }} />
</Show>
</Grid>
<Show when={userCanUpdate() && build.updated}>
<Show

View File

@@ -18,7 +18,7 @@ const BuildArgs: Component<{}> = (p) => {
justifyContent="space-between"
>
<h1>build args</h1>
<Flex alignItems="center" gap="0.2rem">
<Flex alignItems="center">
<Show
when={
!build.docker_build_args?.build_args ||
@@ -71,6 +71,7 @@ const EditBuildArgs: Component<{}> = (p) => {
content={() => (
<TextArea
class="scroller"
placeholder="VARIABLE=value #example"
value={buildArgs()}
onEdit={setBuildArgs}
style={{

View File

@@ -10,7 +10,7 @@ import Loading from "../../../shared/loading/Loading";
import BuildArgs from "./BuildArgs";
import Version from "./Version";
import Repo from "./Repo";
import ListenerUrl from "./ListenerUrl";
import WebhookUrl from "./WebhookUrl";
const BuildConfig: Component<{}> = (p) => {
const { build, reset, save, userCanUpdate } = useConfig();
@@ -21,9 +21,11 @@ const BuildConfig: Component<{}> = (p) => {
<Version />
<Repo />
<Docker />
<BuildArgs />
<CliBuild />
<ListenerUrl />
<BuildArgs />
<Show when={userCanUpdate()}>
<WebhookUrl />
</Show>
</Grid>
<Show when={userCanUpdate() && build.updated}>
<Show

View File

@@ -1,4 +1,4 @@
import { Component, createEffect, createSignal, Show } from "solid-js";
import { Component, createEffect, createResource, createSignal, Show } from "solid-js";
import { client } from "../../../..";
import { useAppState } from "../../../../state/StateProvider";
import { ServerStatus } from "../../../../types";
@@ -12,6 +12,7 @@ import { useConfig } from "../Provider";
const Docker: Component<{}> = (p) => {
const { aws_builder_config } = useAppState();
const { build, setBuild, server, userCanUpdate } = useConfig();
const [dockerOrgs] = createResource(() => client.get_docker_organizations());
const [peripheryDockerAccounts, setPeripheryDockerAccounts] =
createSignal<string[]>();
createEffect(() => {
@@ -25,10 +26,10 @@ const Docker: Component<{}> = (p) => {
if (build.server_id) {
return peripheryDockerAccounts() || [];
} else if (build.aws_config) {
const ami_id =
build.aws_config?.ami_id || aws_builder_config()?.default_ami_id;
return ami_id
? aws_builder_config()?.available_ami_accounts![ami_id].docker || []
const ami_name =
build.aws_config?.ami_name || aws_builder_config()?.default_ami_name;
return ami_name
? aws_builder_config()?.available_ami_accounts![ami_name].docker || []
: [];
} else return [];
};
@@ -86,6 +87,28 @@ const Docker: Component<{}> = (p) => {
disabled={!userCanUpdate()}
/>
</Flex>
<Show when={build.docker_organization || (dockerOrgs() || []).length > 0}>
<Flex
justifyContent={userCanUpdate() ? "space-between" : undefined}
alignItems="center"
style={{ "flex-wrap": "wrap" }}
>
<h2>dockerhub organization: </h2>
<Selector
targetClass="blue"
selected={build.docker_organization || "none"}
items={["none", ...(dockerOrgs() || [])]}
onSelect={(account) => {
setBuild(
"docker_organization",
account === "none" ? undefined : account
);
}}
position="bottom right"
disabled={!userCanUpdate()}
/>
</Flex>
</Show>
</Grid>
);
};

View File

@@ -1,37 +0,0 @@
import { Component, Show } from "solid-js";
import { pushNotification, URL } from "../../../..";
import { copyToClipboard, getId } from "../../../../util/helpers";
import ConfirmButton from "../../../shared/ConfirmButton";
import Icon from "../../../shared/Icon";
import Flex from "../../../shared/layout/Flex";
import Grid from "../../../shared/layout/Grid";
import { useConfig } from "../Provider";
const ListenerUrl: Component<{}> = (p) => {
const { build, userCanUpdate } = useConfig();
const listenerUrl = () => `${URL}/api/listener/build/${getId(build)}`;
return (
<Show when={userCanUpdate()}>
<Grid class="config-item shadow">
<h1>webhook url</h1>
<Flex justifyContent="space-between" alignItems="center">
<div class="ellipsis" style={{ width: "250px" }}>
{listenerUrl()}
</div>
<ConfirmButton
class="blue"
onFirstClick={() => {
copyToClipboard(listenerUrl());
pushNotification("good", "copied url to clipboard");
}}
confirm={<Icon type="check" />}
>
<Icon type="clipboard" />
</ConfirmButton>
</Flex>
</Grid>
</Show>
);
}
export default ListenerUrl;

View File

@@ -25,10 +25,10 @@ const Repo: Component<{}> = (p) => {
if (build.server_id) {
return peripheryGithubAccounts() || [];
} else if (build.aws_config) {
const ami_id =
build.aws_config?.ami_id || aws_builder_config()?.default_ami_id;
return ami_id
? aws_builder_config()?.available_ami_accounts![ami_id].github || []
const ami_name =
build.aws_config?.ami_name || aws_builder_config()?.default_ami_name;
return ami_name
? aws_builder_config()?.available_ami_accounts![ami_name].github || []
: [];
} else return [];
};

View File

@@ -0,0 +1,39 @@
import { Component, createResource, Show } from "solid-js";
import { client } from "../../../..";
import { getId } from "../../../../util/helpers";
import CopyClipboard from "../../../shared/CopyClipboard";
import Flex from "../../../shared/layout/Flex";
import Grid from "../../../shared/layout/Grid";
import Loading from "../../../shared/loading/Loading";
import { useConfig } from "../Provider";
const ListenerUrl: Component<{}> = (p) => {
const { build } = useConfig();
const [github_base_url] = createResource(() =>
client.get_github_webhook_base_url()
);
const listenerUrl = () => {
if (github_base_url()) {
return `${github_base_url()}/api/listener/build/${getId(build)}`;
}
};
return (
<Grid class="config-item shadow">
<h1>webhook url</h1>
<Flex
justifyContent="space-between"
alignItems="center"
style={{ "flex-wrap": "wrap" }}
>
<Show when={listenerUrl()} fallback={<Loading type="three-dot" />}>
<div class="ellipsis" style={{ width: "250px" }}>
{listenerUrl()}
</div>
</Show>
<CopyClipboard copyText={listenerUrl() || ""} copying="url" />
</Flex>
</Grid>
);
};
export default ListenerUrl;

View File

@@ -196,7 +196,7 @@ const Deploy: Component<{ redeploy?: boolean }> = (p) => {
onConfirm={() => {
client.deploy_container(params.id);
}}
title={`redeploy container | ${name()}`}
title="redeploy container"
match={name()!}
>
<Icon type={"reset"} />
@@ -232,7 +232,7 @@ const RemoveContainer = () => {
onConfirm={() => {
client.remove_container(params.id);
}}
title={`destroy container | ${name()}`}
title="destroy container"
match={name()!}
>
<Icon type="trash" />
@@ -298,7 +298,7 @@ const Stop = () => {
onConfirm={() => {
client.stop_container(params.id);
}}
title={`stop container | ${name()}`}
title="stop container"
match={name()!}
>
<Icon type="pause" />

View File

@@ -1,9 +1,10 @@
import { useParams } from "@solidjs/router";
import { Component, onCleanup, Show } from "solid-js";
import { client } from "../..";
import { Component, Show } from "solid-js";
import { useAppDimensions } from "../../state/DimensionProvider";
import { useAppState } from "../../state/StateProvider";
import { ServerStatus } from "../../types";
import { useUser } from "../../state/UserProvider";
import { PermissionLevel } from "../../types";
import Description from "../Description";
import NotFound from "../NotFound";
import Grid from "../shared/layout/Grid";
import Actions from "./Actions";
@@ -16,12 +17,16 @@ const POLLING_RATE = 10000;
// let interval = -1;
const Deployment: Component<{}> = (p) => {
const { user, user_id } = useUser();
const { servers, deployments } = useAppState();
const { isSemiMobile } = useAppDimensions();
const params = useParams();
const deployment = () => deployments.get(params.id);
const server = () =>
deployment() && servers.get(deployment()!.deployment.server_id);
const userCanUpdate = () =>
user().admin ||
deployment()?.deployment.permissions![user_id()] === PermissionLevel.Update;
// clearInterval(interval);
// interval = setInterval(async () => {
// if (server()?.status === ServerStatus.Ok) {
@@ -33,7 +38,7 @@ const Deployment: Component<{}> = (p) => {
return (
<Show
when={deployment() && server()}
fallback={<NotFound type="deployment" />}
fallback={<NotFound type="deployment" loaded={deployments.loaded()} />}
>
<ActionStateProvider>
<Grid
@@ -46,8 +51,14 @@ const Deployment: Component<{}> = (p) => {
style={{ width: "100%" }}
gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}
>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto 1fr" }}>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto auto 1fr" }}>
<Header />
<Description
target={{ type: "Deployment", id: params.id }}
name={deployment()?.deployment.name!}
description={deployment()?.deployment.description}
userCanUpdate={userCanUpdate()}
/>
<Actions />
</Grid>
<Show when={!isSemiMobile()}>

View File

@@ -88,9 +88,7 @@ const Header: Component<{}> = (p) => {
client.delete_deployment(params.id);
}}
class="red"
title={`delete deployment | ${
deployment().deployment.name
}`}
title="delete deployment"
match={deployment().deployment.name}
info={
<Show when={deployment().container}>

View File

@@ -123,7 +123,7 @@ const DeploymentTabs: Component<{}> = () => {
},
],
user().admin && {
title: "collaborators",
title: "permissions",
element: () => <Permissions />,
},
]

View File

@@ -17,19 +17,14 @@ import { Tab } from "../../../shared/tabs/Tabs";
import RepoMount from "./mount-repo/RepoMount";
import { OnClone, OnPull } from "./mount-repo/OnGit";
import Loading from "../../../shared/loading/Loading";
import Permissions from "../Permissions";
import { pushNotification, URL } from "../../../..";
import { combineClasses, copyToClipboard, getId } from "../../../../util/helpers";
import { useAppDimensions } from "../../../../state/DimensionProvider";
import { useUser } from "../../../../state/UserProvider";
import SimpleTabs from "../../../shared/tabs/SimpleTabs";
import ExtraArgs from "./container/ExtraArgs";
import WebhookUrl from "./container/WebhookUrl";
const Config: Component<{}> = () => {
const { deployment, reset, save, userCanUpdate } = useConfig();
const { user } = useUser();
const { isMobile } = useAppDimensions();
const listenerUrl = () => `${URL}/api/listener/deployment/${getId(deployment)}`;
return (
<Show when={deployment.loaded}>
<Grid class="config">
@@ -44,9 +39,7 @@ const Config: Component<{}> = () => {
element: () => (
<Grid class="config-items scroller" placeItems="start center">
<Image />
<Show when={deployment.docker_run_args.image}>
<DockerAccount />
</Show>
<DockerAccount />
<Network />
<Restart />
<Env />
@@ -66,31 +59,7 @@ const Config: Component<{}> = () => {
<Grid class="config-items scroller" placeItems="start center">
<Git />
<Show when={userCanUpdate()}>
<Grid class={combineClasses("config-item shadow")}>
<h1>webhook url</h1>
<Flex
justifyContent="space-between"
alignItems="center"
style={{ "flex-wrap": "wrap" }}
>
<div class="ellipsis" style={{ width: "250px" }}>
{listenerUrl()}
</div>
<ConfirmButton
class="blue"
onFirstClick={() => {
copyToClipboard(listenerUrl());
pushNotification(
"good",
"copied url to clipboard"
);
}}
confirm={<Icon type="check" />}
>
<Icon type="clipboard" />
</ConfirmButton>
</Flex>
</Grid>
<WebhookUrl />
</Show>
<RepoMount />
<OnClone />

View File

@@ -1,4 +1,4 @@
import { Component, createEffect, createSignal, Show } from "solid-js";
import { Component, createEffect, createSignal } from "solid-js";
import { client } from "../../../../..";
import { ServerStatus } from "../../../../../types";
import { combineClasses } from "../../../../../util/helpers";
@@ -16,6 +16,16 @@ const DockerAccount: Component<{}> = (p) => {
.then(setDockerAccounts);
}
});
const when_none_selected = () => {
if (deployment.build_id) {
return "same as build"
} else {
return "none"
}
}
const accounts = () => {
return [when_none_selected(), ...(dockerAccounts() || [])];
}
return (
<Flex
class={combineClasses("config-item shadow")}
@@ -26,11 +36,11 @@ const DockerAccount: Component<{}> = (p) => {
<h1>docker account</h1>
<Selector
targetClass="blue"
items={["none", ...dockerAccounts()!]}
selected={deployment.docker_run_args.docker_account || "none"}
items={accounts()}
selected={deployment.docker_run_args.docker_account || when_none_selected()}
onSelect={(account) =>
setDeployment("docker_run_args", {
docker_account: account === "none" ? undefined : account,
docker_account: account === when_none_selected() ? undefined : account,
})
}
position="bottom right"

View File

@@ -71,6 +71,7 @@ const EditDotEnv: Component<{}> = (p) => {
content={() => (
<TextArea
class="scroller"
placeholder="VARIABLE=value #example"
value={dotenv()}
onEdit={setDotEnv}
style={{

View File

@@ -0,0 +1,41 @@
import { Component, createResource, Show } from "solid-js";
import { client } from "../../../../..";
import { getId } from "../../../../../util/helpers";
import CopyClipboard from "../../../../shared/CopyClipboard";
import Flex from "../../../../shared/layout/Flex";
import Grid from "../../../../shared/layout/Grid";
import Loading from "../../../../shared/loading/Loading";
import { useConfig } from "../Provider";
const WebhookUrl: Component<{}> = (p) => {
const { deployment } = useConfig();
const [github_base_url] = createResource(() =>
client.get_github_webhook_base_url()
);
const listenerUrl = () => {
if (github_base_url()) {
return `${github_base_url()}/api/listener/deployment/${getId(
deployment
)}`;
}
};
return (
<Grid class="config-item shadow">
<h1>webhook url</h1>
<Flex
justifyContent="space-between"
alignItems="center"
style={{ "flex-wrap": "wrap" }}
>
<Show when={listenerUrl()} fallback={<Loading type="three-dot" />}>
<div class="ellipsis" style={{ "max-width": "250px" }}>
{listenerUrl()}
</div>
</Show>
<CopyClipboard copyText={listenerUrl() || ""} copying="url" />
</Flex>
</Grid>
);
};
export default WebhookUrl;

View File

@@ -2,12 +2,11 @@ import { useParams } from "@solidjs/router";
import {
Component,
createEffect,
createMemo,
createSignal,
onCleanup,
Show,
} from "solid-js";
import { client, pushNotification } from "../../../..";
import { pushNotification } from "../../../..";
import { useAppState } from "../../../../state/StateProvider";
import { DockerContainerState, Log as LogType } from "../../../../types";
import { combineClasses } from "../../../../util/helpers";
@@ -100,7 +99,7 @@ const Log: Component<{
position="bottom right"
itemStyle={{ width: "4rem" }}
/>
<Show when={userCanUpdate()}>
{/* <Show when={userCanUpdate()}>
<button
class="blue"
onClick={() =>
@@ -114,7 +113,7 @@ const Log: Component<{
>
download full log
</button>
</Show>
</Show> */}
<button
class="blue"
onClick={async () => {

View File

@@ -21,6 +21,7 @@ import Selector from "../../shared/menu/Selector";
import { TreeSortType, TREE_SORTS, useTreeState } from "./Provider";
const Builds: Component<{}> = (p) => {
const { isSemiMobile } = useAppDimensions();
const { user } = useUser();
const { builds } = useAppState();
const { sort, setSort, build_sorter } = useTreeState();
@@ -69,13 +70,15 @@ const Builds: Component<{}> = (p) => {
<NewBuild />
</Show>
</Grid>
<For each={buildIDs()}>
{(id) => (
<ActionStateProvider build_id={id}>
<Build id={id} />
</ActionStateProvider>
)}
</For>
<Grid gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}>
<For each={buildIDs()}>
{(id) => (
<ActionStateProvider build_id={id}>
<Build id={id} />
</ActionStateProvider>
)}
</For>
</Grid>
</Grid>
);
};
@@ -83,10 +86,10 @@ const Builds: Component<{}> = (p) => {
const Build: Component<{ id: string }> = (p) => {
const { isMobile } = useAppDimensions();
const { user } = useUser();
const { builds, servers } = useAppState();
const { builds } = useAppState();
const build = () => builds.get(p.id)!;
const server = () =>
build().server_id ? servers.get(build().server_id!) : undefined;
// const server = () =>
// build().server_id ? servers.get(build().server_id!) : undefined;
const version = () => {
return `v${build().version.major}.${build().version.minor}.${
build().version.patch
@@ -108,11 +111,11 @@ const Build: Component<{ id: string }> = (p) => {
user().admin ||
build().permissions![getId(user())] === PermissionLevel.Execute ||
build().permissions![getId(user())] === PermissionLevel.Update;
const isAwsBuild = () => build().aws_config ? true : false;
// const isAwsBuild = () => build().aws_config ? true : false;
return (
<A
href={`/build/${p.id}`}
class="card light shadow"
class="card light shadow hoverable"
style={{
width: "100%",
height: "fit-content",
@@ -123,7 +126,7 @@ const Build: Component<{ id: string }> = (p) => {
>
<h1 style={{ "font-size": "1.25rem" }}>{build().name}</h1>
<Flex alignItems="center">
<Show when={server()}>
{/* <Show when={server()}>
<A
href={`/server/${build().server_id!}`}
style={{ padding: 0, opacity: 0.7 }}
@@ -133,7 +136,7 @@ const Build: Component<{ id: string }> = (p) => {
</Show>
<Show when={isAwsBuild()}>
<div style={{ opacity: 0.7 }}>aws build</div>
</Show>
</Show> */}
<h2>{version()}</h2>
<Show when={!isMobile()}>
<div style={{ opacity: 0.7 }}>{lastBuiltAt()}</div>

View File

@@ -75,7 +75,7 @@ const Header: Component<{}> = (p) => {
client.delete_server(params.id);
}}
class="red"
title={`delete server | ${server().server.name}`}
title="delete server"
match={server().server.name}
info={
<div style={{ opacity: 0.7 }}>

View File

@@ -2,6 +2,9 @@ import { useParams } from "@solidjs/router";
import { Component, Show } from "solid-js";
import { useAppDimensions } from "../../state/DimensionProvider";
import { useAppState } from "../../state/StateProvider";
import { useUser } from "../../state/UserProvider";
import { PermissionLevel } from "../../types";
import Description from "../Description";
import NotFound from "../NotFound";
import ServerChildren from "../server_children/ServerChildren";
import Grid from "../shared/layout/Grid";
@@ -12,15 +15,16 @@ import ServerTabs from "./tabs/Tabs";
import Updates from "./Updates";
const Server: Component<{}> = (p) => {
const { user, user_id } = useUser();
const { servers } = useAppState();
const params = useParams();
const server = () => servers.get(params.id)!;
const { isSemiMobile } = useAppDimensions();
// const userCanUpdate = () =>
// user().admin ||
// server()!.server.permissions![getId(user())] === PermissionLevel.Update;
const userCanUpdate = () =>
user().admin ||
server()?.server.permissions![user_id()] === PermissionLevel.Update;
return (
<Show when={server()} fallback={<NotFound type="server" />}>
<Show when={server()} fallback={<NotFound type="server" loaded={servers.loaded()} />}>
<ActionStateProvider>
<Grid
style={{
@@ -32,8 +36,14 @@ const Server: Component<{}> = (p) => {
style={{ width: "100%" }}
gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}
>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto 1fr" }}>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto auto 1fr" }}>
<Header />
<Description
target={{ type: "Server", id: params.id }}
name={server().server.name}
description={server().server.description}
userCanUpdate={userCanUpdate()}
/>
<Actions />
</Grid>
<Show when={!isSemiMobile()}>

View File

@@ -1,13 +1,16 @@
import { useParams } from "@solidjs/router";
import { Component, createResource, For, Show } from "solid-js";
import { client } from "../../..";
import { useAppDimensions } from "../../../state/DimensionProvider";
import { useAppState } from "../../../state/StateProvider";
import { readableStorageAmount } from "../../../util/helpers";
import Flex from "../../shared/layout/Flex";
import Grid from "../../shared/layout/Grid";
import Loading from "../../shared/loading/Loading";
import HoverMenu from "../../shared/menu/HoverMenu";
const Info: Component<{}> = (p) => {
const { isMobile } = useAppDimensions();
const { serverInfo } = useAppState();
const params = useParams();
const [stats] = createResource(() => client.get_server_stats(params.id, { disks: true }));
@@ -74,8 +77,17 @@ const Info: Component<{}> = (p) => {
justifyContent="space-between"
>
<Flex alignItems="center">
<div>mount point:</div>
<h2>{disk.mount}</h2>
<div style={{ "white-space": "nowrap" }}>
mount point:
</div>
<h2
class="ellipsis"
style={{
"max-width": isMobile() ? "50px" : "200px",
}}
>
{disk.mount}
</h2>
</Flex>
<Flex alignItems="center">
<div>{readableStorageAmount(disk.used_gb)} used</div>

View File

@@ -1,12 +1,10 @@
import { A } from "@solidjs/router";
import { Component, createResource, Show } from "solid-js";
import { client } from "../..";
import { Component, Show } from "solid-js";
import { useAppState } from "../../state/StateProvider";
import { DockerContainerState } from "../../types";
import {
combineClasses,
deploymentStateClass,
getId,
readableVersion,
} from "../../util/helpers";
import Circle from "../shared/Circle";
@@ -17,13 +15,11 @@ import s from "./serverchildren.module.scss";
const Deployment: Component<{ id: string }> = (p) => {
const { deployments, builds } = useAppState();
const deployment = () => deployments.get(p.id)!;
const [deployed_version] = createResource(() =>
client.get_deployment_deployed_version(p.id)
);
const image = () => {
if (deployment().deployment.build_id) {
const build = builds.get(deployment().deployment.build_id!)!;
if (deployment().state === DockerContainerState.NotDeployed) {
if (deployment().state === DockerContainerState.NotDeployed) {
if (deployment().deployment.build_id) {
const build = builds.get(deployment().deployment.build_id!);
if (build === undefined) return "unknown"
const version = deployment().deployment.build_version
? readableVersion(deployment().deployment.build_version!).replaceAll(
"v",
@@ -32,15 +28,18 @@ const Deployment: Component<{ id: string }> = (p) => {
: "latest";
return `${build.name}:${version}`;
} else {
return deployed_version() && `${build.name}:${deployed_version()}`;
return deployment().deployment.docker_run_args.image || "unknown";
}
} else if (deployment().container?.image) {
let [account, image] = deployment().container!.image.split("/");
return image ? image : account;
} else {
return deployment().deployment.docker_run_args.image || "unknown";
return "unknown";
}
};
return (
<Show when={deployment()}>
<A href={`/deployment/${p.id}`} class={combineClasses(s.DropdownItem)}>
<A href={`/deployment/${p.id}`} class="card hoverable" style={{ width: "100%", "justify-content": "space-between", padding: "0.5rem" }}>
<Grid gap="0">
<h2>{deployment().deployment.name}</h2>
<div style={{ opacity: 0.7 }}>{image()}</div>

View File

@@ -33,7 +33,6 @@ const ServerChildren: Component<{ id: string }> = (p) => {
<div class="card shadow">
<Grid
gap=".5rem"
class={combineClasses(s.Deployments)}
gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}
>
<For each={deploymentIDs()}>{(id) => <Deployment id={id} />}</For>

View File

@@ -1,16 +1,16 @@
@use "../../style/colors.scss" as c;
.Deployments {
background-color: c.$lightgrey;
transform-origin: top;
padding: 0.5rem;
}
// .Deployments {
// // background-color: c.$lightgrey;
// // transform-origin: top;
// // padding: 0.5rem;
// }
.DropdownItem {
padding: 0.5rem 1rem;
// padding: 0.5rem 1rem;
width: 100%;
justify-content: space-between;
transition: background-color 500ms ease;
// transition: background-color 500ms ease;
}
.DropdownItem:hover {

View File

@@ -2,7 +2,9 @@ import { Component, createSignal, JSX } from "solid-js";
import { pushNotification } from "../..";
import { useToggle } from "../../util/hooks";
import ConfirmButton from "./ConfirmButton";
import CopyClipboard from "./CopyClipboard";
import Input from "./Input";
import Flex from "./layout/Flex";
import Grid from "./layout/Grid";
import CenterMenu from "./menu/CenterMenu";
@@ -22,6 +24,12 @@ const ConfirmMenuButton: Component<{
show={show}
toggleShow={toggleShow}
title={p.title}
leftOfX={() => (
<Flex alignItems="center" justifyContent="space-between" style={{ width: "100%" }}>
<h1>{p.match}</h1>
<CopyClipboard copyText={p.match} copying="name" />
</Flex>
)}
targetClass={p.class}
target={p.children}
content={() => (

View File

@@ -0,0 +1,22 @@
import { Component } from "solid-js";
import { pushNotification } from "../..";
import { copyToClipboard } from "../../util/helpers";
import ConfirmButton from "./ConfirmButton";
import Icon from "./Icon";
const CopyClipboard: Component<{ copyText: string; copying?: string; }> = (p) => {
return (
<ConfirmButton
class="blue"
onFirstClick={() => {
copyToClipboard(p.copyText);
pushNotification("good", `copied ${p.copying || "text"} to clipboard`);
}}
confirm={<Icon type="check" />}
>
<Icon type="clipboard" />
</ConfirmButton>
);
}
export default CopyClipboard;

View File

@@ -17,6 +17,7 @@ const TextArea: Component<
onBlur={(e) => p.onConfirm && p.onConfirm(e.currentTarget.value)}
onKeyDown={(e) => {
if (e.key === "Enter" && p.onEnter) {
e.preventDefault();
p.onEnter(e.currentTarget.value);
}
}}

View File

@@ -68,7 +68,7 @@ const Child: Component<{
return (
<Grid
class={combineClasses(s.CenterMenuContainer)}
onClick={(e) => {
onPointerDown={(e) => {
e.stopPropagation();
p.toggleShow();
}}
@@ -78,6 +78,7 @@ const Child: Component<{
class={combineClasses(s.Menu, "shadow")}
style={{ padding: (p.padding as any) || "1rem", ...p.style }}
onClick={(e) => e.stopPropagation()}
onPointerDown={(e) => e.stopPropagation()}
>
<Flex
class={s.CenterMenuHeader}
@@ -85,7 +86,7 @@ const Child: Component<{
justifyContent="space-between"
alignItems="center"
>
<div class={s.CenterMenuTitle}>{p.title}</div>
<h1>{p.title}</h1>
<Flex alignItems="center">
{p.leftOfX && p.leftOfX()}
<button class="red" onClick={p.toggleShow}>

View File

@@ -142,11 +142,6 @@ $anim-time: 350ms;
background-color: rgba(0, 0, 0, 0.4);
}
.CenterMenuTitle {
font-size: 1.5rem;
font-weight: 500;
}
.SelectorItem:hover {
background-color: c.$lightgrey;
}

View File

@@ -1,11 +1,12 @@
.NotificationProvider {
position: absolute;
position: sticky;
left: 50vw;
top: 5px;
top: 0.5rem;
background-color: transparent;
display: flex;
flex-direction: column;
z-index: 9000;
height: 0px;
}
.Notification {

View File

@@ -15,25 +15,26 @@ import { AppStateProvider } from "./state/StateProvider";
export const TOPBAR_HEIGHT = 50;
export const MAX_PAGE_WIDTH = 1200;
export const URL =
export const MONITOR_BASE_URL =
import.meta.env.MODE === "production"
? location.origin
: (import.meta.env.VITE_MONITOR_HOST as string) || "http://localhost:9000";
export const UPDATE_WS_URL = URL.replace("http", "ws") + "/ws/update";
export const UPDATE_WS_URL = MONITOR_BASE_URL.replace("http", "ws") + "/ws/update";
const token =
(import.meta.env.VITE_ACCESS_TOKEN as string) ||
localStorage.getItem("access_token") ||
null;
export const client = new Client(URL, token);
export const client = new Client(MONITOR_BASE_URL, token);
export const { Notifications, pushNotification } = makeNotifications();
client.initialize().then(() => {
render(
() => [
<Notifications />,
<DimensionProvider>
<UserProvider>
<LoginGuard>
@@ -45,7 +46,6 @@ client.initialize().then(() => {
</LoginGuard>
</UserProvider>
</DimensionProvider>,
<Notifications />,
],
document.getElementById("root") as HTMLElement
);

View File

@@ -1,5 +1,14 @@
@use "colors" as c;
.app-bounder {
display: grid;
grid-template-columns: 1fr;
width: 100vw;
max-width: calc(1200px + 2rem);
box-sizing: border-box;
place-items: center;
}
.app {
display: grid;
grid-template-columns: 1fr;
@@ -28,6 +37,7 @@
.card {
background-color: c.$grey;
padding: 1rem;
transition: all 250ms ease-in-out;
}
.card.light {
@@ -38,6 +48,10 @@
background-color: c.$darkgrey;
}
.card.hoverable:hover {
background-color: rgba(c.$lightblue, 0.5);
}
.content {
grid-template-columns: auto 1fr;
background-color: c.$darkgrey;

View File

@@ -38,7 +38,7 @@ h1 {
h2 {
font-size: 1.1rem;
font-weight: 450;
user-select: none;
// user-select: none;
margin: 0;
}
@@ -294,7 +294,6 @@ svg {
}
.ellipsis {
width: 100%;
text-overflow: ellipsis;
overflow: hidden;
white-space: nowrap;
@@ -313,4 +312,12 @@ svg {
.wrap {
flex-wrap: wrap;
}
}
// .hoverable {
// transition: all 250ms ease-in-out;
// }
// .hoverable:hover {
// background-color: rgba(c.$lightblue, 0.5);
// }

View File

@@ -9,6 +9,7 @@ export type PermissionsMap = Record<string, PermissionLevel>;
export interface Action {
_id?: string;
name: string;
description?: string;
path: string;
command: string;
server_ids?: string[];
@@ -22,6 +23,7 @@ export interface Action {
export interface Build {
_id?: string;
name: string;
description?: string;
permissions?: PermissionsMap;
server_id?: string;
aws_config?: AwsBuilderBuildConfig;
@@ -32,6 +34,7 @@ export interface Build {
pre_build?: Command;
docker_build_args?: DockerBuildArgs;
docker_account?: string;
docker_organization?: string;
last_built_at?: string;
created_at?: string;
updated_at?: string;
@@ -62,7 +65,7 @@ export interface BuildVersionsReponse {
export interface AwsBuilderBuildConfig {
region?: string;
instance_type?: string;
ami_id?: string;
ami_name?: string;
volume_gb?: number;
subnet_id?: string;
security_group_ids?: string[];
@@ -73,7 +76,7 @@ export interface AwsBuilderBuildConfig {
export interface AwsBuilderConfig {
access_key_id: string;
secret_access_key: string;
default_ami_id: string;
default_ami_name: string;
default_subnet_id: string;
default_key_pair_name: string;
available_ami_accounts?: AvailableAmiAccounts;
@@ -85,7 +88,7 @@ export interface AwsBuilderConfig {
}
export interface AmiAccounts {
name: string;
ami_id: string;
github?: string[];
docker?: string[];
}
@@ -93,6 +96,7 @@ export interface AmiAccounts {
export interface Deployment {
_id?: string;
name: string;
description?: string;
server_id: string;
permissions?: PermissionsMap;
docker_run_args: DockerRunArgs;
@@ -140,6 +144,7 @@ export interface DockerRunArgs {
export interface BasicContainerInfo {
name: string;
id: string;
image: string;
state: DockerContainerState;
status?: string;
}
@@ -162,6 +167,7 @@ export interface DockerContainerStats {
export interface Group {
_id?: string;
name: string;
description?: string;
permissions?: PermissionsMap;
builds: string[];
deployments: string[];
@@ -190,6 +196,7 @@ export interface UserCredentials {
export interface Procedure {
_id?: string;
name: string;
description?: string;
stages?: ProcedureStage[];
webhook_branches?: string[];
permissions?: PermissionsMap;
@@ -205,6 +212,7 @@ export interface ProcedureStage {
export interface Server {
_id?: string;
name: string;
description?: string;
address: string;
permissions?: PermissionsMap;
enabled: boolean;

View File

@@ -1,6 +1,6 @@
import axios from "axios";
import fileDownload from "js-file-download";
import { URL } from "..";
import { MONITOR_BASE_URL } from "..";
import {
AwsBuilderConfig,
BasicContainerInfo,
@@ -43,6 +43,7 @@ import {
ModifyUserCreateServerBody,
ModifyUserEnabledBody,
PermissionsUpdateBody,
UpdateDescriptionBody,
} from "./client_types";
import { generateQuery, QueryObject } from "./helpers";
@@ -56,7 +57,7 @@ export class Client {
const params = new URLSearchParams(location.search);
const exchange_token = params.get("token");
if (exchange_token) {
history.replaceState({}, "", URL);
history.replaceState({}, "", MONITOR_BASE_URL);
try {
const jwt = await this.exchange_for_jwt(exchange_token);
this.token = jwt;
@@ -72,11 +73,11 @@ export class Client {
}
login_with_github() {
location.replace(`${URL}/auth/github/login`);
location.replace(`${MONITOR_BASE_URL}/auth/github/login`);
}
login_with_google() {
location.replace(`${URL}/auth/google/login`);
location.replace(`${MONITOR_BASE_URL}/auth/google/login`);
}
async login(credentials: UserCredentials) {
@@ -111,11 +112,11 @@ export class Client {
}
}
async get_username(user_id: string): Promise<string> {
get_username(user_id: string): Promise<string> {
return this.get(`/api/username/${user_id}`);
}
async list_users(): Promise<User[]> {
list_users(): Promise<User[]> {
return this.get("/api/users");
}
@@ -123,6 +124,14 @@ export class Client {
return this.post("/auth/exchange", { token: exchange_token });
}
get_github_webhook_base_url(): Promise<string> {
return this.get("/api/github_webhook_base_url");
}
update_description(body: UpdateDescriptionBody): Promise<undefined> {
return this.post("/api/update_description", body);
}
// deployment
list_deployments(
@@ -361,6 +370,10 @@ export class Client {
return this.get("/api/build/aws_builder_defaults");
}
get_docker_organizations(): Promise<string[]> {
return this.get("/api/build/docker_organizations");
}
// procedure
list_procedures(query?: QueryObject): Promise<Procedure[]> {
@@ -470,8 +483,8 @@ export class Client {
return this.post("/api/permissions/modify_create_build", body);
}
async get<R = any>(url: string): Promise<R> {
return await axios({
get<R = any>(url: string): Promise<R> {
return axios({
method: "get",
url: this.baseURL + url,
headers: {
@@ -480,8 +493,8 @@ export class Client {
}).then(({ data }) => data);
}
async post<B = any, R = any>(url: string, body?: B): Promise<R> {
return await axios({
post<B = any, R = any>(url: string, body?: B): Promise<R> {
return axios({
method: "post",
url: this.baseURL + url,
headers: {
@@ -491,8 +504,8 @@ export class Client {
}).then(({ data }) => data);
}
async patch<B = any, R = any>(url: string, body: B): Promise<R> {
return await axios({
patch<B = any, R = any>(url: string, body: B): Promise<R> {
return axios({
method: "patch",
url: this.baseURL + url,
headers: {
@@ -502,8 +515,8 @@ export class Client {
}).then(({ data }) => data);
}
async delete<R = any>(url: string): Promise<R> {
return await axios({
delete<R = any>(url: string): Promise<R> {
return axios({
method: "delete",
url: this.baseURL + url,
headers: {

View File

@@ -2,7 +2,7 @@
Generated by typeshare 1.0.0
*/
import { PermissionLevel, PermissionsTarget } from "../types";
import { PermissionLevel, PermissionsTarget, UpdateTarget } from "../types";
export interface CreateBuildBody {
name: string;
@@ -37,6 +37,11 @@ export interface CreateGroupBody {
name: string;
}
export interface UpdateDescriptionBody {
target: UpdateTarget;
description: string;
}
export interface PermissionsUpdateBody {
user_id: string;
permission: PermissionLevel;

View File

@@ -1,6 +1,6 @@
[package]
name = "db_client"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_helpers"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
authors = ["MoghTech"]
description = "helpers used as dependency for mogh tech monitor"
@@ -9,20 +9,10 @@ license = "GPL-3.0-or-later"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = "1.25"
types = { package = "monitor_types", path = "../types" }
periphery_client = { path = "../periphery_client" }
async_timing_util = "0.1.14"
bollard = "0.13"
anyhow = "1.0"
axum = { version = "0.6", features = ["ws", "json"] }
axum = "0.6"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
toml = "0.7"
run_command = { version = "0.0.5", features = ["async_tokio"] }
rand = "0.8"
futures = "0.3"
futures-util = "0.3.25"
aws-config = "0.54"
aws-sdk-ec2 = "0.24"

View File

@@ -1,127 +0,0 @@
use std::sync::Arc;
use anyhow::anyhow;
use axum::Extension;
use bollard::{
container::{ListContainersOptions, StatsOptions},
Docker,
};
use futures::{future::join_all, Stream};
use futures_util::stream::StreamExt;
use types::{BasicContainerInfo, DockerContainerState, ImageSummary, Network};
pub use bollard::container::Stats;
pub type DockerExtension = Extension<Arc<DockerClient>>;
pub struct DockerClient {
docker: Docker,
}
impl DockerClient {
pub fn extension() -> DockerExtension {
let client = DockerClient {
docker: Docker::connect_with_local_defaults()
.expect("failed to connect to docker daemon"),
};
Extension(Arc::new(client))
}
pub async fn list_containers(&self) -> anyhow::Result<Vec<BasicContainerInfo>> {
let res = self
.docker
.list_containers(Some(ListContainersOptions::<String> {
all: true,
..Default::default()
}))
.await?
.into_iter()
.map(|s| {
let info = BasicContainerInfo {
id: s.id.unwrap_or_default(),
name: s
.names
.ok_or(anyhow!("no names on container"))?
.pop()
.ok_or(anyhow!("no names on container (empty vec)"))?
.replace("/", ""),
state: s.state.unwrap().parse().unwrap(),
status: s.status,
};
Ok::<_, anyhow::Error>(info)
})
.collect::<anyhow::Result<Vec<BasicContainerInfo>>>()?;
Ok(res)
}
pub fn container_stats_stream(
&self,
container_name: &str,
) -> impl Stream<Item = Result<Stats, bollard::errors::Error>> {
self.docker.stats(
container_name,
Some(StatsOptions {
stream: true,
..Default::default()
}),
)
}
pub async fn get_container_stats(&self, container_name: &str) -> anyhow::Result<Stats> {
let mut stats = self
.docker
.stats(
container_name,
Some(StatsOptions {
stream: false,
..Default::default()
}),
)
.take(1)
.next()
.await
.ok_or(anyhow!("got no stats for {container_name}"))??;
stats.name = stats.name.replace("/", "");
Ok(stats)
}
pub async fn get_container_stats_list(&self) -> anyhow::Result<Vec<Stats>> {
let futures = self
.list_containers()
.await?
.into_iter()
.filter(|c| c.state == DockerContainerState::Running)
.map(|c| async move {
let mut stats = self
.docker
.stats(
&c.name,
Some(StatsOptions {
stream: false,
..Default::default()
}),
)
.take(1)
.next()
.await
.ok_or(anyhow!("got no stats for {}", c.name))??;
stats.name = stats.name.replace("/", "");
Ok::<_, anyhow::Error>(stats)
});
let stats = join_all(futures)
.await
.into_iter()
.collect::<anyhow::Result<_>>()?;
Ok(stats)
}
pub async fn list_networks(&self) -> anyhow::Result<Vec<Network>> {
let networks = self.docker.list_networks::<String>(None).await?;
Ok(networks)
}
pub async fn list_images(&self) -> anyhow::Result<Vec<ImageSummary>> {
let images = self.docker.list_images::<String>(None).await?;
Ok(images)
}
}

View File

@@ -1,15 +1,29 @@
use std::{fs::File, io::Read, net::SocketAddr, str::FromStr};
use std::{borrow::Borrow, fs::File, io::Read, net::SocketAddr, str::FromStr};
use anyhow::Context;
use anyhow::{anyhow, Context};
use axum::http::StatusCode;
use rand::{distributions::Alphanumeric, Rng};
use run_command::{async_run_command, CommandOutput};
use serde::de::DeserializeOwned;
use types::{monitor_timestamp, Log};
use serde_json::{Map, Value};
use types::Log;
pub mod aws;
pub mod docker;
pub mod git;
pub fn parse_config_files<'a, T: DeserializeOwned>(
paths: impl IntoIterator<Item = impl Borrow<String>>,
merge_nested: bool,
extend_array: bool,
) -> anyhow::Result<T> {
let mut target = Map::new();
for path in paths {
target = merge_objects(
target,
parse_config_file(path.borrow())?,
merge_nested,
extend_array,
)?;
}
serde_json::from_str(&serde_json::to_string(&target)?)
.context("failed to parse final config into expected type")
}
pub fn parse_config_file<T: DeserializeOwned>(path: &str) -> anyhow::Result<T> {
let mut file = File::open(&path).expect(&format!("failed to find config at {path}"));
@@ -26,22 +40,88 @@ pub fn parse_config_file<T: DeserializeOwned>(path: &str) -> anyhow::Result<T> {
Ok(config)
}
pub fn output_into_log(
stage: &str,
command: String,
start_ts: String,
output: CommandOutput,
) -> Log {
let success = output.success();
Log {
stage: stage.to_string(),
stdout: output.stdout,
stderr: output.stderr,
command,
success,
start_ts,
end_ts: monitor_timestamp(),
/// object is serde_json::Map<String, serde_json::Value>
/// source will overide target
/// will recurse when field is object if merge_object = true, otherwise object will be replaced
/// will extend when field is array if extend_array = true, otherwise array will be replaced
/// will return error when types on source and target fields do not match
fn merge_objects(
mut target: Map<String, Value>,
source: Map<String, Value>,
merge_nested: bool,
extend_array: bool,
) -> anyhow::Result<Map<String, Value>> {
for (key, value) in source {
let curr = target.remove(&key);
if curr.is_none() {
target.insert(key, value);
continue;
}
let curr = curr.unwrap();
match curr {
Value::Object(target_obj) => {
if !merge_nested {
target.insert(key, value);
continue;
}
match value {
Value::Object(source_obj) => {
target.insert(
key,
Value::Object(merge_objects(
target_obj,
source_obj,
merge_nested,
extend_array,
)?),
);
}
_ => {
return Err(anyhow!(
"types on field {key} do not match. got {value:?}, expected object"
))
}
}
}
Value::Array(mut target_arr) => {
if !extend_array {
target.insert(key, value);
continue;
}
match value {
Value::Array(source_arr) => {
target_arr.extend(source_arr);
target.insert(key, Value::Array(target_arr));
}
_ => {
return Err(anyhow!(
"types on field {key} do not match. got {value:?}, expected array"
))
}
}
}
_ => {
target.insert(key, value);
}
}
}
Ok(target)
}
pub fn parse_comma_seperated_list<T: FromStr>(
comma_sep_list: impl Borrow<str>,
) -> anyhow::Result<Vec<T>> {
comma_sep_list
.borrow()
.split(",")
.filter(|item| item.len() > 0)
.map(|item| {
let item = item
.parse()
.map_err(|_| anyhow!("error parsing string {item} into type T"))?;
Ok::<T, anyhow::Error>(item)
})
.collect()
}
pub fn get_socket_addr(port: u16) -> SocketAddr {
@@ -52,12 +132,6 @@ pub fn to_monitor_name(name: &str) -> String {
name.to_lowercase().replace(" ", "_")
}
pub async fn run_monitor_command(stage: &str, command: String) -> Log {
let start_ts = monitor_timestamp();
let output = async_run_command(&command).await;
output_into_log(stage, command, start_ts, output)
}
pub fn handle_anyhow_error(err: anyhow::Error) -> (StatusCode, String) {
(
StatusCode::INTERNAL_SERVER_ERROR,

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_client"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
authors = ["MoghTech"]
description = "a client to interact with the monitor system"
@@ -9,7 +9,7 @@ license = "GPL-3.0-or-later"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
monitor_types = "0.2.0"
monitor_types = "0.2.7"
# monitor_types = { path = "../types" }
reqwest = { version = "0.11", features = ["json"] }
tokio-tungstenite = { version = "0.18", features=["native-tls"] }

View File

@@ -97,6 +97,12 @@ impl MonitorClient {
.context("failed at getting aws builder defaults")
}
pub async fn get_docker_organizations(&self) -> anyhow::Result<Vec<String>> {
self.get("/api/build/docker_organizations", Option::<()>::None)
.await
.context("failed at getting docker organizations")
}
// pub async fn reclone_build(&self, id: &str) -> anyhow::Result<Update> {
// self.post::<(), _>(&format!("/api/build/{id}/reclone"), None)
// .await

View File

@@ -10,6 +10,7 @@ pub use futures_util;
pub use tokio_tungstenite;
pub use monitor_types as types;
use types::UpdateTarget;
mod build;
mod deployment;
@@ -119,19 +120,44 @@ impl MonitorClient {
json!({ "username": username.into(), "password": password.into() }),
)
.await
.context("failed at call to create_user")
}
pub async fn get_user(&self) -> anyhow::Result<User> {
self.get("/api/user", Option::<()>::None).await
self.get("/api/user", Option::<()>::None)
.await
.context("failed at call to get_user")
}
pub async fn get_username(&self, user_id: &str) -> anyhow::Result<String> {
self.get_string(&format!("/api/username/{user_id}"), Option::<()>::None)
.await
.context("failed at call to get_username")
}
pub async fn list_users(&self) -> anyhow::Result<Vec<User>> {
self.get("/api/users", Option::<()>::None).await
self.get("/api/users", Option::<()>::None)
.await
.context("failed at call to list_users")
}
pub async fn get_github_webhook_base_url(&self) -> anyhow::Result<String> {
self.get("/api/github_webhook_base_url", Option::<()>::None)
.await
.context("failed at call to get_github_webhook_base_url")
}
pub async fn update_description(
&self,
target: UpdateTarget,
description: &str,
) -> anyhow::Result<()> {
self.post(
"/api/update_description",
json!({ "target": target, "description": description }),
)
.await
.context("failed at call to update_description")
}
async fn get<R: DeserializeOwned>(

View File

@@ -1,6 +1,6 @@
[package]
name = "periphery_client"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_types"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
authors = ["MoghTech"]
description = "types for the mogh tech monitor"
@@ -15,7 +15,7 @@ bson = "2.4"
strum = "0.24"
strum_macros = "0.24"
diff-struct = "0.5"
bollard = "0.13"
bollard = "0.14.0"
derive_builder = "0.12"
typeshare = "1.0.0"
chrono = "0.4"

View File

@@ -23,6 +23,11 @@ pub struct Action {
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub name: String,
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub description: String,
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub path: String,

View File

@@ -27,6 +27,11 @@ pub struct Build {
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub name: String,
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub description: String,
#[serde(default)]
#[diff(attr(#[serde(skip_serializing)]))]
#[builder(setter(skip))]
@@ -69,6 +74,10 @@ pub struct Build {
#[diff(attr(#[serde(skip_serializing_if = "option_diff_no_change")]))]
pub docker_account: Option<String>,
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "option_diff_no_change")]))]
pub docker_organization: Option<String>,
#[serde(default, skip_serializing_if = "String::is_empty")]
#[diff(attr(#[serde(skip)]))]
#[builder(setter(skip))]
@@ -164,7 +173,7 @@ pub struct AwsBuilderBuildConfig {
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "option_diff_no_change")]))]
pub ami_id: Option<String>,
pub ami_name: Option<String>,
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "option_diff_no_change")]))]

View File

@@ -43,6 +43,9 @@ pub struct CoreConfig {
// used to verify validity from github webhooks
pub github_webhook_secret: String,
// used to form the frontend listener url, if None will use 'host'.
pub github_webhook_base_url: Option<String>,
// sent in auth header with req to periphery
pub passkey: String,
@@ -52,6 +55,10 @@ pub struct CoreConfig {
// enable login with local auth
pub local_auth: bool,
// allowed docker orgs used with monitor. first in this list will be default for build
#[serde(default)]
pub docker_organizations: Vec<String>,
pub mongo: MongoConfig,
#[serde(default)]
@@ -108,7 +115,7 @@ pub struct AwsBuilderConfig {
#[serde(skip_serializing)]
pub secret_access_key: String,
pub default_ami_id: String,
pub default_ami_name: String,
pub default_subnet_id: String,
pub default_key_pair_name: String,
@@ -144,12 +151,12 @@ fn default_instance_type() -> String {
}
#[typeshare]
pub type AvailableAmiAccounts = HashMap<String, AmiAccounts>; // (ami_id, AmiAccounts)
pub type AvailableAmiAccounts = HashMap<String, AmiAccounts>; // (ami_name, AmiAccounts)
#[typeshare]
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct AmiAccounts {
pub name: String,
pub ami_id: String,
#[serde(default)]
pub github: Vec<String>,
#[serde(default)]

View File

@@ -24,6 +24,11 @@ pub struct Deployment {
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub name: String, // must be formatted to be compat with docker
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub description: String,
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub server_id: String,
@@ -176,6 +181,7 @@ fn default_network() -> String {
pub struct BasicContainerInfo {
pub name: String,
pub id: String,
pub image: String,
pub state: DockerContainerState,
pub status: Option<String>,
}

View File

@@ -23,6 +23,11 @@ pub struct Group {
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub name: String,
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub description: String,
#[serde(default)]
#[diff(attr(#[serde(skip_serializing)]))]
#[builder(setter(skip))]

View File

@@ -23,6 +23,11 @@ pub struct Procedure {
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub name: String,
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub description: String,
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "vec_diff_no_change")]))]

View File

@@ -26,6 +26,11 @@ pub struct Server {
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub name: String,
#[serde(default)]
#[builder(default)]
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub description: String,
#[diff(attr(#[serde(skip_serializing_if = "Option::is_none")]))]
pub address: String,
@@ -94,7 +99,7 @@ impl Default for Server {
address: Default::default(),
permissions: Default::default(),
enabled: true,
auto_prune: false,
auto_prune: true,
to_notify: Default::default(),
cpu_alert: default_cpu_alert(),
mem_alert: default_mem_alert(),
@@ -102,6 +107,7 @@ impl Default for Server {
stats_interval: Default::default(),
region: Default::default(),
instance_id: Default::default(),
description: Default::default(),
created_at: Default::default(),
updated_at: Default::default(),
}

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_periphery"
version = "0.2.0"
version = "0.2.7"
edition = "2021"
authors = ["MoghTech"]
description = "monitor periphery binary | run monitor periphery as system daemon"
@@ -18,18 +18,19 @@ types = { package = "monitor_types", path = "../lib/types" }
run_command = { version = "0.0.5", features = ["async_tokio"] }
async_timing_util = "0.1.14"
tokio = { version = "1.25", features = ["full"] }
# tokio-util = "0.7"
axum = { version = "0.6", features = ["ws"] }
tower = { version = "0.4", features = ["full"] }
futures = "0.3"
# futures-util = "0.3.25"
dotenv = "0.15"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
bollard = "0.13"
bollard = "0.14.0"
anyhow = "1.0"
envy = "0.4"
sysinfo = "0.28"
toml = "0.7"
daemonize = "0.4"
clap = { version = "4.0", features = ["derive"] }
futures-util = "0.3"
tokio-util = "0.7"
daemonize = "0.5.0"
clap = { version = "4.1", features = ["derive"] }

View File

@@ -1,9 +1,12 @@
use anyhow::Context;
use axum::{routing::post, Extension, Json, Router};
use helpers::{docker, handle_anyhow_error};
use helpers::handle_anyhow_error;
use types::{Build, Log};
use crate::{helpers::get_docker_token, PeripheryConfigExtension};
use crate::{
helpers::{docker, get_docker_token},
PeripheryConfigExtension,
};
pub fn router() -> Router {
Router::new().route(

View File

@@ -1,9 +1,9 @@
use anyhow::Context;
use axum::{routing::post, Json, Router};
use helpers::{handle_anyhow_error, run_monitor_command};
use helpers::handle_anyhow_error;
use types::Command;
use crate::HomeDirExtension;
use crate::{helpers::run_monitor_command, HomeDirExtension};
pub fn router() -> Router {
Router::new().route(

View File

@@ -4,14 +4,17 @@ use axum::{
routing::{get, post},
Extension, Json, Router,
};
use helpers::{
docker::{self, DockerExtension},
handle_anyhow_error, to_monitor_name,
};
use helpers::{handle_anyhow_error, to_monitor_name};
use serde::Deserialize;
use types::{Deployment, Log};
use crate::{helpers::get_docker_token, response, PeripheryConfigExtension};
use crate::{
helpers::{
docker::{self, DockerExtension},
get_docker_token,
},
response, PeripheryConfigExtension,
};
#[derive(Deserialize)]
struct Container {

View File

@@ -1,9 +1,12 @@
use axum::{routing::post, Extension, Json, Router};
use helpers::{git, handle_anyhow_error, to_monitor_name};
use helpers::{handle_anyhow_error, to_monitor_name};
use serde::Deserialize;
use types::{CloneArgs, Command, Log};
use crate::{helpers::get_github_token, PeripheryConfigExtension};
use crate::{
helpers::{get_github_token, git},
PeripheryConfigExtension,
};
#[derive(Deserialize)]
pub struct DeleteRepoBody {

View File

@@ -2,12 +2,12 @@ use axum::{
routing::{get, post},
Extension, Json, Router,
};
use helpers::{
docker::{self, DockerExtension},
handle_anyhow_error,
};
use helpers::handle_anyhow_error;
use crate::response;
use crate::{
helpers::docker::{self, DockerExtension},
response,
};
pub fn router() -> Router {
Router::new()

View File

@@ -9,11 +9,10 @@ use axum::{
routing::get,
Json, RequestExt, Router,
};
use helpers::docker::DockerClient;
use serde_json::Value;
use types::{monitor_timestamp, PeripheryConfig};
use crate::{HomeDirExtension, PeripheryConfigExtension};
use crate::{helpers::docker::DockerClient, HomeDirExtension, PeripheryConfigExtension};
use self::stats::{StatsClient, StatsExtension};

View File

@@ -2,13 +2,13 @@ use axum::{
routing::{get, post},
Extension, Json, Router,
};
use helpers::{
docker::{self, DockerExtension},
handle_anyhow_error,
};
use helpers::handle_anyhow_error;
use serde::Deserialize;
use crate::response;
use crate::{
helpers::docker::{self, DockerExtension},
response,
};
#[derive(Deserialize, Clone)]
pub struct NetworkReqBody {

View File

@@ -4,45 +4,33 @@ use std::{
};
use async_timing_util::wait_until_timelength;
use axum::{
extract::{ws::Message, Query, WebSocketUpgrade},
response::IntoResponse,
routing::get,
Extension, Json, Router,
};
use futures_util::{SinkExt, StreamExt};
use axum::{extract::Query, routing::get, Extension, Json, Router};
use sysinfo::{ComponentExt, CpuExt, DiskExt, NetworkExt, PidExt, ProcessExt, SystemExt};
use tokio::{
select,
sync::broadcast::{self, Receiver},
};
use tokio_util::sync::CancellationToken;
use types::{
DiskUsage, SingleCpuUsage, SingleDiskUsage, SystemComponent, SystemInformation, SystemNetwork,
SystemProcess, SystemStats, SystemStatsQuery, Timelength,
};
pub fn router() -> Router {
Router::new()
.route(
"/",
get(
|sys: StatsExtension, Query(query): Query<SystemStatsQuery>| async move {
let stats = sys.read().unwrap().get_cached_stats(query);
Json(stats)
},
),
)
.route(
"/ws",
get(
|sys: StatsExtension,
Query(query): Query<SystemStatsQuery>,
ws: WebSocketUpgrade| async move {
sys.read().unwrap().ws_subscribe(ws, Arc::new(query))
},
),
)
Router::new().route(
"/",
get(
|sys: StatsExtension, Query(query): Query<SystemStatsQuery>| async move {
let stats = sys.read().unwrap().get_cached_stats(query);
Json(stats)
},
),
)
// .route(
// "/ws",
// get(
// |sys: StatsExtension,
// Query(query): Query<SystemStatsQuery>,
// ws: WebSocketUpgrade| async move {
// sys.read().unwrap().ws_subscribe(ws, Arc::new(query))
// },
// ),
// )
}
pub type StatsExtension = Extension<Arc<RwLock<StatsClient>>>;
@@ -54,7 +42,7 @@ pub struct StatsClient {
polling_rate: Timelength,
refresh_ts: u128,
refresh_list_ts: u128,
receiver: Receiver<SystemStats>,
// receiver: Receiver<SystemStats>,
}
const BYTES_PER_GB: f64 = 1073741824.0;
@@ -63,7 +51,7 @@ const BYTES_PER_KB: f64 = 1024.0;
impl StatsClient {
pub fn extension(polling_rate: Timelength) -> StatsExtension {
let (sender, receiver) = broadcast::channel::<SystemStats>(10);
// let (sender, receiver) = broadcast::channel::<SystemStats>(10);
let sys = sysinfo::System::new_all();
let client = StatsClient {
info: get_system_information(&sys),
@@ -72,7 +60,7 @@ impl StatsClient {
polling_rate,
refresh_ts: 0,
refresh_list_ts: 0,
receiver,
// receiver,
};
let client = Arc::new(RwLock::new(client));
let clone = client.clone();
@@ -86,9 +74,9 @@ impl StatsClient {
client.refresh_ts = ts;
client.cache = client.get_stats();
}
sender
.send(clone.read().unwrap().cache.clone())
.expect("failed to broadcast new stats to reciever");
// sender
// .send(clone.read().unwrap().cache.clone())
// .expect("failed to broadcast new stats to reciever");
}
});
let clone = client.clone();
@@ -103,66 +91,66 @@ impl StatsClient {
Extension(client)
}
fn ws_subscribe(
&self,
ws: WebSocketUpgrade,
query: Arc<SystemStatsQuery>,
) -> impl IntoResponse {
// println!("client subscribe");
let mut reciever = self.get_receiver();
ws.on_upgrade(|socket| async move {
let (mut ws_sender, mut ws_reciever) = socket.split();
let cancel = CancellationToken::new();
let cancel_clone = cancel.clone();
tokio::spawn(async move {
loop {
let mut stats = select! {
_ = cancel_clone.cancelled() => break,
stats = reciever.recv() => { stats.expect("failed to recv stats msg") }
};
if query.cpus {
stats.cpus = vec![]
}
if !query.disks {
stats.disk.disks = vec![]
}
if !query.components {
stats.components = vec![]
}
if !query.networks {
stats.networks = vec![]
}
if !query.processes {
stats.processes = vec![]
}
let _ = ws_sender
.send(Message::Text(serde_json::to_string(&stats).unwrap()))
.await;
}
});
while let Some(msg) = ws_reciever.next().await {
match msg {
Ok(msg) => match msg {
Message::Close(_) => {
// println!("client CLOSE");
cancel.cancel();
return;
}
_ => {}
},
Err(_) => {
// println!("client CLOSE");
cancel.cancel();
return;
}
}
}
})
}
// fn ws_subscribe(
// &self,
// ws: WebSocketUpgrade,
// query: Arc<SystemStatsQuery>,
// ) -> impl IntoResponse {
// // println!("client subscribe");
// let mut reciever = self.get_receiver();
// ws.on_upgrade(|socket| async move {
// let (mut ws_sender, mut ws_reciever) = socket.split();
// let cancel = CancellationToken::new();
// let cancel_clone = cancel.clone();
// tokio::spawn(async move {
// loop {
// let mut stats = select! {
// _ = cancel_clone.cancelled() => break,
// stats = reciever.recv() => { stats.expect("failed to recv stats msg") }
// };
// if query.cpus {
// stats.cpus = vec![]
// }
// if !query.disks {
// stats.disk.disks = vec![]
// }
// if !query.components {
// stats.components = vec![]
// }
// if !query.networks {
// stats.networks = vec![]
// }
// if !query.processes {
// stats.processes = vec![]
// }
// let _ = ws_sender
// .send(Message::Text(serde_json::to_string(&stats).unwrap()))
// .await;
// }
// });
// while let Some(msg) = ws_reciever.next().await {
// match msg {
// Ok(msg) => match msg {
// Message::Close(_) => {
// // println!("client CLOSE");
// cancel.cancel();
// return;
// }
// _ => {}
// },
// Err(_) => {
// // println!("client CLOSE");
// cancel.cancel();
// return;
// }
// }
// }
// })
// }
fn get_receiver(&self) -> Receiver<SystemStats> {
self.receiver.resubscribe()
}
// fn get_receiver(&self) -> Receiver<SystemStats> {
// self.receiver.resubscribe()
// }
fn refresh(&mut self) {
self.sys.refresh_cpu();

View File

@@ -3,7 +3,7 @@ use std::sync::Arc;
use axum::Extension;
use clap::Parser;
use dotenv::dotenv;
use helpers::parse_config_file;
use helpers::{parse_comma_seperated_list, parse_config_files};
use serde::Deserialize;
use types::PeripheryConfig;
@@ -25,9 +25,12 @@ pub struct Args {
#[arg(long, default_value = "~/.monitor/periphery.log.err")]
pub stderr: String,
/// Sets the path of config file to use
/// Sets the path of a config file to use. can use multiple times
#[arg(short, long)]
pub config_path: Option<String>,
pub config_path: Option<Vec<String>>,
#[arg(short, long)]
pub merge_nested_config: bool,
#[arg(short, long)]
pub home_dir: Option<String>,
@@ -39,7 +42,7 @@ pub struct Args {
#[derive(Deserialize)]
struct Env {
#[serde(default = "default_config_path")]
config_path: String,
config_paths: String,
}
pub fn load() -> (Args, u16, PeripheryConfigExtension, HomeDirExtension) {
@@ -51,15 +54,24 @@ pub fn load() -> (Args, u16, PeripheryConfigExtension, HomeDirExtension) {
std::process::exit(0)
}
let home_dir = get_home_dir(&args.home_dir);
let config_path = args
let config_paths = args
.config_path
.as_ref()
.unwrap_or(&env.config_path)
.replace("~", &home_dir);
let config =
parse_config_file::<PeripheryConfig>(&config_path).expect("failed to parse config file");
.unwrap_or(
&parse_comma_seperated_list(env.config_paths)
.expect("failed to parse config paths on environment into comma seperated list"),
)
.into_iter()
.map(|p| p.replace("~", &home_dir))
.collect();
let config = parse_config_files::<PeripheryConfig>(
&config_paths,
args.merge_nested_config,
args.merge_nested_config,
)
.expect("failed at parsing config");
let _ = std::fs::create_dir(&config.repo_dir);
print_startup_log(&config_path, &args, &config);
print_startup_log(config_paths, &args, &config);
(
args,
config.port,
@@ -68,25 +80,30 @@ pub fn load() -> (Args, u16, PeripheryConfigExtension, HomeDirExtension) {
)
}
fn print_startup_log(config_path: &str, args: &Args, config: &PeripheryConfig) {
println!("\nconfig path: {config_path}");
let mut config = config.clone();
config.github_accounts = config
fn print_startup_log(config_paths: Vec<String>, args: &Args, config: &PeripheryConfig) {
println!("\nconfig paths: {config_paths:?}");
let mut config_for_print = config.clone();
config_for_print.github_accounts = config_for_print
.github_accounts
.into_iter()
.map(|(a, _)| (a, "<SECRET>".to_string()))
.collect();
config.docker_accounts = config
config_for_print.docker_accounts = config_for_print
.docker_accounts
.into_iter()
.map(|(a, _)| (a, "<SECRET>".to_string()))
.collect();
config.secrets = config
config_for_print.secrets = config_for_print
.secrets
.into_iter()
.map(|(s, _)| (s, "<SECRET>".to_string()))
.collect();
println!("{config:#?}");
config_for_print.passkeys = config_for_print
.passkeys
.into_iter()
.map(|_| "<SECRET>".to_string())
.collect();
println!("{config_for_print:#?}");
if args.daemon {
println!("daemon mode enabled");
}
@@ -94,7 +111,7 @@ fn print_startup_log(config_path: &str, args: &Args, config: &PeripheryConfig) {
}
fn default_config_path() -> String {
"/config/periphery.config.toml".to_string()
"~/.monitor/periphery.config.toml".to_string()
}
fn get_home_dir(home_dir_arg: &Option<String>) -> String {

View File

@@ -1,9 +1,10 @@
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use helpers::to_monitor_name;
use types::{Build, DockerBuildArgs, EnvironmentVar, Log, Version};
use crate::{run_monitor_command, to_monitor_name};
use crate::helpers::run_monitor_command;
use super::docker_login;
@@ -18,6 +19,7 @@ pub async fn build(
version,
docker_build_args,
docker_account,
docker_organization,
..
}: &Build,
mut repo_dir: PathBuf,
@@ -36,32 +38,13 @@ pub async fn build(
.await
.context("failed to login to docker")?;
repo_dir.push(&name);
// let pull_logs = git::pull(repo_dir.clone(), branch, &None).await;
// if !all_logs_success(&pull_logs) {
// logs.extend(pull_logs);
// return Ok(logs);
// }
// logs.extend(pull_logs);
// if let Some(command) = pre_build {
// let dir = repo_dir.join(&command.path);
// let pre_build_log = run_monitor_command(
// "pre build",
// format!("cd {} && {}", dir.display(), command.command),
// )
// .await;
// if !pre_build_log.success {
// logs.push(pre_build_log);
// return Ok(logs);
// }
// logs.push(pre_build_log);
// }
let build_dir = repo_dir.join(build_path);
let dockerfile_path = match dockerfile_path {
Some(dockerfile_path) => dockerfile_path.to_owned(),
None => "Dockerfile".to_owned(),
};
let build_args = parse_build_args(build_args);
let image_name = get_image_name(docker_account, &name);
let image_name = get_image_name(&name, docker_account, docker_organization);
let image_tags = image_tags(&image_name, &version);
let docker_push = if using_account {
format!(" && docker image push --all-tags {image_name}")
@@ -77,10 +60,17 @@ pub async fn build(
Ok(logs)
}
fn get_image_name(docker_account: &Option<String>, name: &str) -> String {
match docker_account {
Some(docker_account) => format!("{docker_account}/{name}"),
None => name.to_string(),
fn get_image_name(
name: &str,
docker_account: &Option<String>,
docker_organization: &Option<String>,
) -> String {
match docker_organization {
Some(docker_org) => format!("{docker_org}/{name}"),
None => match docker_account {
Some(docker_account) => format!("{docker_account}/{name}"),
None => name.to_string(),
},
}
}

View File

@@ -0,0 +1,123 @@
use std::sync::Arc;
use anyhow::anyhow;
use axum::Extension;
use bollard::{container::ListContainersOptions, Docker};
use types::{BasicContainerInfo, ImageSummary, Network};
pub use bollard::container::Stats;
pub type DockerExtension = Extension<Arc<DockerClient>>;
pub struct DockerClient {
docker: Docker,
}
impl DockerClient {
pub fn extension() -> DockerExtension {
let client = DockerClient {
docker: Docker::connect_with_local_defaults()
.expect("failed to connect to docker daemon"),
};
Extension(Arc::new(client))
}
pub async fn list_containers(&self) -> anyhow::Result<Vec<BasicContainerInfo>> {
let res = self
.docker
.list_containers(Some(ListContainersOptions::<String> {
all: true,
..Default::default()
}))
.await?
.into_iter()
.map(|s| {
let info = BasicContainerInfo {
id: s.id.unwrap_or_default(),
name: s
.names
.ok_or(anyhow!("no names on container"))?
.pop()
.ok_or(anyhow!("no names on container (empty vec)"))?
.replace("/", ""),
image: s.image.unwrap_or(String::from("unknown")),
state: s.state.unwrap().parse().unwrap(),
status: s.status,
};
Ok::<_, anyhow::Error>(info)
})
.collect::<anyhow::Result<Vec<BasicContainerInfo>>>()?;
Ok(res)
}
// pub fn container_stats_stream(
// &self,
// container_name: &str,
// ) -> impl Stream<Item = Result<Stats, bollard::errors::Error>> {
// self.docker.stats(
// container_name,
// Some(StatsOptions {
// stream: true,
// ..Default::default()
// }),
// )
// }
// pub async fn get_container_stats(&self, container_name: &str) -> anyhow::Result<Stats> {
// let mut stats = self
// .docker
// .stats(
// container_name,
// Some(StatsOptions {
// stream: false,
// ..Default::default()
// }),
// )
// .take(1)
// .next()
// .await
// .ok_or(anyhow!("got no stats for {container_name}"))??;
// stats.name = stats.name.replace("/", "");
// Ok(stats)
// }
// pub async fn get_container_stats_list(&self) -> anyhow::Result<Vec<Stats>> {
// let futures = self
// .list_containers()
// .await?
// .into_iter()
// .filter(|c| c.state == DockerContainerState::Running)
// .map(|c| async move {
// let mut stats = self
// .docker
// .stats(
// &c.name,
// Some(StatsOptions {
// stream: false,
// ..Default::default()
// }),
// )
// .take(1)
// .next()
// .await
// .ok_or(anyhow!("got no stats for {}", c.name))??;
// stats.name = stats.name.replace("/", "");
// Ok::<_, anyhow::Error>(stats)
// });
// let stats = join_all(futures)
// .await
// .into_iter()
// .collect::<anyhow::Result<_>>()?;
// Ok(stats)
// }
pub async fn list_networks(&self) -> anyhow::Result<Vec<Network>> {
let networks = self.docker.list_networks::<String>(None).await?;
Ok(networks)
}
pub async fn list_images(&self) -> anyhow::Result<Vec<ImageSummary>> {
let images = self.docker.list_images::<String>(None).await?;
Ok(images)
}
}

View File

@@ -1,12 +1,13 @@
use std::path::PathBuf;
use anyhow::{anyhow, Context};
use helpers::to_monitor_name;
use run_command::async_run_command;
use types::{
Conversion, Deployment, DockerContainerStats, DockerRunArgs, EnvironmentVar, Log, RestartMode,
};
use crate::{run_monitor_command, to_monitor_name};
use crate::helpers::run_monitor_command;
use super::docker_login;

View File

@@ -1,6 +1,6 @@
use types::Log;
use crate::run_monitor_command;
use crate::helpers::run_monitor_command;
pub async fn create_network(name: &str, driver: Option<String>) -> Log {
let driver = match driver {

View File

@@ -2,9 +2,10 @@ use std::path::PathBuf;
use ::run_command::async_run_command;
use anyhow::anyhow;
use helpers::to_monitor_name;
use types::{monitor_timestamp, CloneArgs, Command, GithubToken, Log};
use crate::{run_monitor_command, to_monitor_name};
use super::run_monitor_command;
pub async fn pull(
mut path: PathBuf,
@@ -95,17 +96,22 @@ async fn clone(
let command = format!("git clone {repo_url} {destination}{branch}");
let start_ts = monitor_timestamp();
let output = async_run_command(&command).await;
let command = if access_token_at.len() > 0 {
command.replace(&access_token.unwrap(), "<TOKEN>")
let success = output.success();
let (command, stderr) = if access_token_at.len() > 0 {
let access_token = access_token.unwrap();
(
command.replace(&access_token, "<TOKEN>"),
output.stderr.replace(&access_token, "<TOKEN>"),
)
} else {
command
(command, output.stderr)
};
Log {
stage: "clone repo".to_string(),
command,
success: output.success(),
success,
stdout: output.stdout,
stderr: output.stderr,
stderr,
start_ts,
end_ts: monitor_timestamp(),
}

View File

@@ -1,5 +1,9 @@
use anyhow::anyhow;
use types::{DockerToken, GithubToken, PeripheryConfig};
use run_command::{async_run_command, CommandOutput};
use types::{monitor_timestamp, DockerToken, GithubToken, Log, PeripheryConfig};
pub mod docker;
pub mod git;
#[macro_export]
macro_rules! response {
@@ -37,3 +41,27 @@ pub fn get_docker_token(
None => Ok(None),
}
}
pub async fn run_monitor_command(stage: &str, command: String) -> Log {
let start_ts = monitor_timestamp();
let output = async_run_command(&command).await;
output_into_log(stage, command, start_ts, output)
}
pub fn output_into_log(
stage: &str,
command: String,
start_ts: String,
output: CommandOutput,
) -> Log {
let success = output.success();
Log {
stage: stage.to_string(),
stdout: output.stdout,
stderr: output.stderr,
command,
success,
start_ts,
end_ts: monitor_timestamp(),
}
}

View File

@@ -8,4 +8,5 @@ a tool to build and deploy software across many servers
2. [connecting servers](https://github.com/mbecker20/monitor/blob/main/docs/servers.md)
3. [building](https://github.com/mbecker20/monitor/blob/main/docs/builds.md)
4. [deploying](https://github.com/mbecker20/monitor/blob/main/docs/deployments.md)
5. [permissioning](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md)
5. [permissioning](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md)
6. [core setup](https://github.com/mbecker20/monitor/blob/main/docs/setup.md)