Compare commits

...

90 Commits
v0.2.12 ... v0

Author SHA1 Message Date
mbecker20
9c0be07ae1 refactor caching to use custom Cache struct 2023-05-28 08:31:18 +00:00
mbecker20
ab945aadde fix set termination timeout 0 2023-05-25 20:18:39 +00:00
mbecker20
c1c461c273 add command crud and run api 2023-05-24 07:45:52 +00:00
mbecker20
336742ee69 update author and clap args 2023-05-24 05:28:46 +00:00
beckerinj
405dacce1c sanitize container logs for any script tags 2023-05-13 02:23:05 -04:00
beckerinj
9acd45aa93 fix log whitespace non preservation issue 2023-05-13 02:14:14 -04:00
beckerinj
c889c2cc03 clean up log component imports 2023-05-13 01:49:06 -04:00
mbecker20
7ac91ef416 view images on server 2023-05-12 07:22:56 +00:00
mbecker20
8e28669aa1 potentially fix deployment update getting crossed with another deployment 2023-05-09 21:17:10 +00:00
mbecker20
6cdb91f8b8 more readable container state in header 2023-05-04 01:12:55 +00:00
mbecker20
e892474713 modify create deployment initializer 2023-05-03 21:03:57 +00:00
mbecker20
abdae98816 core handle term signal 2023-05-03 20:02:37 +00:00
mbecker20
ab4fe49f33 deployment / build config reset 2023-05-03 19:31:59 +00:00
mbecker20
1ace35103b fix ansi-to-html install 2023-05-03 07:21:38 +00:00
beckerinj
dbee729eee show ansi colors in the logs correctly 2023-05-03 03:13:42 -04:00
mbecker20
792576ce59 add auto redeploy user 2023-05-02 17:21:25 +00:00
mbecker20
a07624e9b9 0.3.4 fix docker stop --signal on older docker versions 2023-05-01 21:03:25 +00:00
mbecker20
bb8054af8a log version first 2023-05-01 08:43:00 +00:00
mbecker20
7738f3e066 core logs version on startup 2023-05-01 08:34:20 +00:00
mbecker20
5dee16a100 0.3.3 add default term signal and timeout to deployment 2023-05-01 08:28:12 +00:00
mbecker20
35f3bcdf2f update core version 2023-05-01 03:05:51 +00:00
mbecker20
130ca8e1f1 bump versions to 0.3.2 2023-05-01 01:53:58 +00:00
mbecker20
ced4c21688 update monitor client to 0.3.1 2023-05-01 01:41:23 +00:00
mbecker20
6ec7078024 custom termination signals 2023-04-30 06:52:27 +00:00
mbecker20
b28d8f2506 update frontend types 2023-04-30 03:26:01 +00:00
mbecker20
c88a9291a0 support auto redeploy and custom stop signals 2023-04-30 00:10:59 +00:00
mbecker20
1e82d19306 build summary defaults to time view 2023-04-21 16:34:03 +00:00
mbecker20
dd87e50cb2 build stats summary 2023-04-21 08:52:17 +00:00
mbecker20
4c8f96a30f build stats card 2023-04-21 08:08:15 +00:00
mbecker20
c4f45e05f1 finish build stats api 2023-04-21 08:08:01 +00:00
mbecker20
6aa382c7c1 finish build stats api 2023-04-20 16:34:14 +00:00
mbecker20
ccb9f059e6 get build stats api 2023-04-20 07:34:49 +00:00
mbecker20
1cdcea0771 start on route to get daily build stats (time, count) 2023-04-19 07:02:21 +00:00
mbecker20
88dda0de80 update rename deployment to check whether deployment has repo attached, and if so, reclone it to account for name change. 2023-04-19 06:44:58 +00:00
mbecker20
30ed99e2b0 publish monitor client 0.3.1 with Readme 2023-04-18 07:56:30 +00:00
mbecker20
e5953b7541 monitor client readme 2023-04-18 07:55:05 +00:00
mbecker20
1f9d01c59f new home servers png 2023-04-18 06:31:23 +00:00
mbecker20
cc5210a3d8 fix server children add new button 2023-04-18 06:17:42 +00:00
mbecker20
26559e2d3b delete builds screenshots 2023-04-18 03:20:20 +00:00
mbecker20
7eeddb300f add link to screenshots docsite 2023-04-18 02:59:37 +00:00
mbecker20
1e01bae16b add screenshots to monitor readme 2023-04-18 02:47:46 +00:00
mbecker20
87c03924e5 remove second universal search 2023-04-18 02:46:46 +00:00
mbecker20
f0998b1d43 add universal search screenshot 2023-04-18 02:42:21 +00:00
mbecker20
1995a04244 add screenshots 2023-04-18 02:38:07 +00:00
mbecker20
420fe6bcd5 add build time to version selector 2023-04-17 08:26:34 +00:00
mbecker20
d4e26c0553 fix docker repo reference 2023-04-16 19:33:28 +00:00
mbecker20
5f5e7cb45e add note about oauth 2023-04-16 07:44:34 +00:00
beckerinj
8aa0304738 core setup doc 2023-04-16 03:32:06 -04:00
beckerinj
8ec98c33a4 first user is auto enabled and made admin 2023-04-16 03:23:30 -04:00
beckerinj
2667182ca3 update core example config 2023-04-16 02:49:00 -04:00
mbecker20
1cd0018b93 0.3.0 check whether pre_build / on_clone / on_pull are non empty before running 2023-04-14 22:58:10 +00:00
beckerinj
359789ee29 update aws sdk version 2023-04-14 12:52:52 -04:00
mbecker20
e79c860c0f make update hover 2023-04-14 15:06:05 +00:00
mbecker20
765f53f30e types doc 2023-04-14 06:14:33 +00:00
beckerinj
3c3c21d7f5 move header to top. redesign build 2023-04-14 01:57:10 -04:00
mbecker20
eb700cb500 improve search functionality 2023-04-14 03:53:21 +00:00
beckerinj
b3b723a717 build show unknown if builds[id] cannot be found 2023-04-13 11:19:51 -04:00
beckerinj
555c230d2e update menu stays open through updates 2023-04-13 03:07:31 -04:00
beckerinj
adf4b97aef lots of api docs 2023-04-13 02:23:40 -04:00
beckerinj
32c38d796b docs 2023-04-12 18:19:13 -04:00
beckerinj
c8829e15ed get most of servers api docs 2023-04-10 17:58:52 -04:00
beckerinj
453df417d0 finish deployment api doc 2023-04-10 04:39:47 -04:00
mbecker20
02a7741a9c don't show group editing ui if user doesn't have permissions 2023-04-09 22:55:30 +00:00
mbecker20
96fc5b0ca8 group page / edit work with non admin users 2023-04-09 22:38:36 +00:00
mbecker20
b13e624a66 support manage user permissions on groups 2023-04-09 19:24:07 +00:00
beckerinj
6a8f66f272 work on deployment api docs 2023-04-09 02:15:08 -04:00
mbecker20
0c638a08fd fix problems with build config page breaking 2023-04-07 20:15:21 +00:00
mbecker20
b07f8af8e5 deployment extra args fix as well 2023-04-07 19:50:16 +00:00
mbecker20
3bbb2a985f extra args frontend needs to account for when they don't exist 2023-04-07 19:50:06 +00:00
beckerinj
afdf71c545 work on api docs 2023-04-07 11:12:44 -04:00
beckerinj
8de8d2df9a work on API docs 2023-04-06 01:03:01 -04:00
mbecker20
1dffdbddc2 sort by ts increasing 2023-04-05 18:40:12 +00:00
mbecker20
11fff633b0 don't use $mod in stats query 2023-04-05 18:17:56 +00:00
mbecker20
61bc44d1f4 add hover class to home tree build 2023-04-04 21:18:13 +00:00
beckerinj
e8fabb8cfa Update index.mdx 2023-04-03 16:40:15 -04:00
mbecker20
7a50885847 exit group view with Escape in addition to ArrowLeft 2023-04-03 16:35:44 +00:00
beckerinj
6239da45f4 delete old docs and provide link to docsite 2023-04-03 11:28:50 -04:00
mbecker20
af597eb3c7 clean up builds 2023-04-03 05:55:37 +00:00
mbecker20
d66cda068c connect up delete group 2023-04-03 05:52:58 +00:00
beckerinj
91fcd07c1c make group behavior more sensible frontned 2023-04-03 01:50:45 -04:00
mbecker20
85aa470da1 publish docs 2023-04-03 02:48:00 +00:00
mbecker20
6f0d5f37a5 update home page description for sso 2023-04-02 22:30:37 +00:00
beckerinj
1b4d604404 deploymentBranch 2023-04-02 18:22:04 -04:00
beckerinj
a7f6cbe0b9 small docsite fixes 2023-04-02 12:44:35 -04:00
beckerinj
9cf28bf123 improve docs 2023-04-02 04:15:06 -04:00
beckerinj
c92e04294a monitor docs site working 2023-04-02 04:03:01 -04:00
beckerinj
36f059b455 docsite 2023-04-02 01:40:56 -04:00
mbecker20
4aac301852 0.2.14 only try to merge files, filter nested directories 2023-04-01 19:35:04 +00:00
mbecker20
b375708bbd 0.2.13 support config directories 2023-04-01 19:02:43 +00:00
mbecker20
10b6a9482b update aws sdk verison and implement merge_config_files 2023-04-01 07:06:17 +00:00
193 changed files with 14767 additions and 1869 deletions

689
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "monitor_cli"
version = "0.2.12"
version = "0.3.4"
edition = "2021"
authors = ["MoghTech"]
description = "monitor cli | tools to setup monitor system"

View File

@@ -10,19 +10,16 @@ port = 9000
# daily utc offset in hours to send daily update. eg 8:00 eastern time is 13:00 UTC, so offset should be 13. default of 0 runs at UTC midnight.
daily_offset_hours = 13
# number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
keep_stats_for_days = 120
# secret used to generate the jwt. should be some randomly generated hash.
jwt_secret = "your_jwt_secret"
# can be 1-hr, 12-hr, 1-day, 3-day, 1-wk, 2-wk, 30-day
jwt_valid_for = "1-wk"
# webhook url given by slack app
# webhook url given by slack app that monitor will send alerts and a daily update to
slack_url = "your_slack_app_webhook_url"
# token that has to be given to github during webhook config as the secret
# token that has to be given to github during repo webhook config as the secret
github_webhook_secret = "your_random_webhook_secret"
# optional. an alternate base url that is used to recieve github webhook requests. if not provided, will use 'host' address as base
@@ -31,30 +28,20 @@ github_webhook_base_url = "https://monitor-github-webhook.mogh.tech"
# token used to authenticate core requests to periphery
passkey = "your_random_passkey"
# can be 30-sec, 1-min, 2-min, 5-min
# controls the granularity of the system stats collection by monitor core
# can be 15-sec, 30-sec, 1-min, 2-min, 5-min
monitoring_interval = "1-min"
# number of days to keep stats around, or 0 to disable pruning. stats older than this number of days are deleted daily
keep_stats_for_days = 14
# these will be used by the GUI to attach to builds. New build docker orgs will default to first org (or none if empty).
# when attached to build, image will be pushed to repo under the specified organization
docker_organizations = ["your_docker_org1", "your_docker_org_2"]
# allow or deny user login with username / password
local_auth = true
# these will be given in the GUI to attach to builds. New build docker orgs will default to first org (or none if empty).
docker_organizations = ["your_docker_org1", "your_docker_org_2"]
[aws]
access_key_id = "your_aws_key_id"
secret_access_key = "your_aws_secret_key"
default_region = "us-east-1"
default_ami_id = "your_periphery_ami"
default_key_pair_name = "your_default_key_pair_name"
default_instance_type = "m5.2xlarge"
default_volume_gb = 8
default_subnet_id = "your_default_subnet_id"
default_security_group_ids = ["sg_id_1", "sg_id_2"]
default_assign_public_ip = false
[aws.available_ami_accounts]
your_periphery_ami = { name = "default ami", github = ["github_username"], docker = ["docker_username"] }
[github_oauth]
enabled = true
id = "your_github_client_id"
@@ -68,4 +55,19 @@ secret = "your_google_client_secret"
[mongo]
uri = "your_mongo_uri"
app_name = "monitor_core"
db_name = "monitor"
db_name = "monitor" # this is the name of the mongo database that monitor will create its collections in.
[aws]
access_key_id = "your_aws_key_id"
secret_access_key = "your_aws_secret_key"
default_region = "us-east-1"
default_ami_name = "your_ami_name" # must be defined below in [aws.available_ami_accounts]
default_instance_type = "m5.2xlarge"
default_volume_gb = 8
default_subnet_id = "your_default_subnet_id"
default_security_group_ids = ["sg_id_1", "sg_id_2"]
default_key_pair_name = "your_default_key_pair_name"
default_assign_public_ip = false
[aws.available_ami_accounts]
your_ami_name = { ami_id = "ami-1234567890", github = ["github_username"], docker = ["docker_username"] }

View File

@@ -1,6 +1,6 @@
[package]
name = "core"
version = "0.2.12"
version = "0.3.4"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -11,17 +11,16 @@ types = { package = "monitor_types", path = "../lib/types" }
db = { package = "db_client", path = "../lib/db_client" }
periphery = { package = "periphery_client", path = "../lib/periphery_client" }
axum_oauth2 = { path = "../lib/axum_oauth2" }
tokio = { version = "1.26", features = ["full"] }
tokio-tungstenite = { version = "0.18", features=["native-tls"] }
tokio-util = "0.7"
tokio = { version = "1.28", features = ["full"] }
tokio-tungstenite = { version = "0.19", features=["native-tls"] }
tokio-util = { version = "0.7"}
axum = { version = "0.6", features = ["ws", "json"] }
axum-extra = { version = "0.5.0", features = ["spa"] }
tower = { version = "0.4", features = ["full"] }
tower-http = { version = "0.4.0", features = ["cors"] }
tower = { version = "0.4", features = ["timeout"] }
tower-http = { version = "0.4", features = ["fs", "cors"] }
slack = { package = "slack_client_rs", version = "0.0.8" }
mungos = "0.3.14"
futures-util = "0.3"
mungos = "0.3.19"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0"
dotenv = "0.15"
envy = "0.4"
@@ -31,9 +30,10 @@ jwt = "0.16"
hmac = "0.12"
sha2 = "0.10"
async_timing_util = "0.1.14"
futures-util = "0.3"
diff-struct = "0.5"
typeshare = "1.0.0"
typeshare = "1.0.1"
hex = "0.4"
aws-config = "0.54"
aws-sdk-ec2 = "0.24"
aws-config = "0.55.2"
aws-sdk-ec2 = "0.27.0"
merge_config_files = "0.1.3"
termination_signal = "0.1.2"

View File

@@ -1,20 +1,20 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use aws_sdk_ec2::Client;
use diff::Diff;
use futures_util::future::join_all;
use helpers::{all_logs_success, to_monitor_name};
use mungos::{doc, to_bson};
use mungos::mongodb::bson::{doc, to_bson};
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
AwsBuilderBuildConfig, Build, Log, Operation, PermissionLevel, Update, UpdateStatus,
UpdateTarget, Version,
monitor_timestamp, traits::Permissioned, AwsBuilderBuildConfig, Build, DockerContainerState,
Log, Operation, PermissionLevel, Update, UpdateStatus, UpdateTarget, Version,
};
use crate::{
auth::RequestUser,
cloud::aws::{
self, create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance,
create_ec2_client, create_instance_with_ami, terminate_ec2_instance, Ec2Instance,
},
helpers::empty_or_only_spaces,
state::State,
@@ -41,13 +41,6 @@ impl State {
}
}
pub async fn build_busy(&self, id: &str) -> bool {
match self.build_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_build(&self, name: &str, user: &RequestUser) -> anyhow::Result<Build> {
if !user.is_admin && !user.create_build_permissions {
return Err(anyhow!("user does not have permission to create builds"));
@@ -115,7 +108,7 @@ impl State {
}
pub async fn delete_build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Build> {
if self.build_busy(build_id).await {
if self.build_action_states.busy(build_id).await {
return Err(anyhow!("build busy"));
}
let build = self
@@ -145,21 +138,23 @@ impl State {
new_build: Build,
user: &RequestUser,
) -> anyhow::Result<Build> {
if self.build_busy(&new_build.id).await {
if self.build_action_states.busy(&new_build.id).await {
return Err(anyhow!("build busy"));
}
let id = new_build.id.clone();
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(id.clone()).or_default();
entry.updating = true;
}
self.build_action_states
.update_entry(id.clone(), |entry| {
entry.updating = true;
})
.await;
let res = self.update_build_inner(new_build, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(id).or_default();
entry.updating = false;
}
self.build_action_states
.update_entry(id.clone(), |entry| {
entry.updating = false;
})
.await;
res
}
@@ -250,20 +245,20 @@ impl State {
}
pub async fn build(&self, build_id: &str, user: &RequestUser) -> anyhow::Result<Update> {
if self.build_busy(build_id).await {
if self.build_action_states.busy(build_id).await {
return Err(anyhow!("build busy"));
}
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.building = true;
}
self.build_action_states
.update_entry(build_id.to_string(), |entry| {
entry.building = true;
})
.await;
let res = self.build_inner(build_id, user).await;
{
let mut lock = self.build_action_states.lock().await;
let entry = lock.entry(build_id.to_string()).or_default();
entry.building = false;
}
self.build_action_states
.update_entry(build_id.to_string(), |entry| {
entry.building = false;
})
.await;
res
}
@@ -414,6 +409,8 @@ impl State {
.await;
}
self.handle_post_build_redeploy(build_id, &mut update).await;
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
@@ -423,10 +420,89 @@ impl State {
Ok(update)
}
async fn handle_post_build_redeploy(&self, build_id: &str, update: &mut Update) {
let redeploy_deployments = self
.db
.deployments
.get_some(
doc! { "build_id": build_id, "redeploy_on_build": true },
None,
)
.await;
if let Ok(deployments) = redeploy_deployments {
let futures = deployments.into_iter().map(|d| async move {
let request_user = RequestUser {
id: "auto redeploy".to_string(),
is_admin: true,
..Default::default()
};
let state = self
.get_deployment_with_container_state(&request_user, &d.id)
.await
.map(|r| r.state)
.unwrap_or_default();
if state == DockerContainerState::Running {
Some((
d.id.clone(),
self.deploy_container(
&d.id,
&RequestUser {
id: "auto redeploy".to_string(),
is_admin: true,
..Default::default()
},
None,
None,
)
.await,
))
} else {
None
}
});
let redeploy_results = join_all(futures).await;
let mut redeploys = Vec::<String>::new();
let mut redeploy_failures = Vec::<String>::new();
for res in redeploy_results {
if res.is_none() {
continue;
}
let (id, res) = res.unwrap();
match res {
Ok(_) => redeploys.push(id),
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
}
}
if redeploys.len() > 0 {
update.logs.push(Log::simple(
"redeploy",
format!("redeployed deployments: {}", redeploys.join(", ")),
))
}
if redeploy_failures.len() > 0 {
update.logs.push(Log::simple(
"redeploy failures",
redeploy_failures.join("\n"),
))
}
} else if let Err(e) = redeploy_deployments {
update.logs.push(Log::simple(
"redeploys failed",
format!("failed to get deployments to redeploy: {e:#?}"),
))
}
}
async fn create_ec2_instance_for_build(
&self,
build: &Build,
) -> anyhow::Result<(Ec2Instance, Option<aws::Client>, Vec<Log>)> {
) -> anyhow::Result<(Ec2Instance, Option<Client>, Vec<Log>)> {
if build.aws_config.is_none() {
return Err(anyhow!("build has no aws_config attached"));
}
@@ -527,7 +603,7 @@ impl State {
async fn terminate_ec2_instance(
&self,
aws_client: aws::Client,
aws_client: Client,
server: &Ec2Instance,
update: &mut Update,
) {

238
core/src/actions/command.rs Normal file
View File

@@ -0,0 +1,238 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::all_logs_success;
use types::{
monitor_timestamp, traits::Permissioned, Log, Operation, PeripheryCommand,
PeripheryCommandBuilder, PermissionLevel, Update, UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_command_check_permissions(
&self,
command_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<PeripheryCommand> {
let command = self.db.get_command(command_id).await?;
let permissions = command.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(command)
} else {
Err(anyhow!(
"user does not have required permissions on this command"
))
}
}
pub async fn create_command(
&self,
name: &str,
server_id: String,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let command = PeripheryCommandBuilder::default()
.name(name.to_string())
.server_id(server_id)
.build()
.context("failed to build command")?;
let command_id = self
.db
.commands
.create_one(command)
.await
.context("failed at adding command to db")?;
let command = self.db.get_command(&command_id).await?;
let update = Update {
target: UpdateTarget::Command(command_id),
operation: Operation::CreateCommand,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(command)
}
pub async fn create_full_command(
&self,
mut command: PeripheryCommand,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
command.id = self
.create_command(&command.name, command.server_id.clone(), user)
.await?
.id;
let command = self.update_command(command, user).await?;
Ok(command)
}
pub async fn copy_command(
&self,
target_id: &str,
new_name: String,
new_server_id: String,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
let mut command = self
.get_command_check_permissions(target_id, user, PermissionLevel::Update)
.await?;
command.name = new_name;
command.server_id = new_server_id;
let command = self.create_full_command(command, user).await?;
Ok(command)
}
pub async fn delete_command(
&self,
command_id: &str,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
if self.command_action_states.busy(command_id).await {
return Err(anyhow!("command busy"));
}
let command = self
.get_command_check_permissions(command_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
self.db.commands.delete_one(command_id).await?;
let update = Update {
target: UpdateTarget::Command(command_id.to_string()),
operation: Operation::DeleteCommand,
start_ts,
end_ts: Some(monitor_timestamp()),
operator: user.id.clone(),
logs: vec![Log::simple(
"delete command",
format!("deleted command {}", command.name),
)],
success: true,
..Default::default()
};
self.add_update(update).await?;
Ok(command)
}
pub async fn update_command(
&self,
mut new_command: PeripheryCommand,
user: &RequestUser,
) -> anyhow::Result<PeripheryCommand> {
let current_command = self
.get_command_check_permissions(&new_command.id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
// none of these should be changed through this method
new_command.permissions = current_command.permissions.clone();
new_command.server_id = current_command.server_id.clone();
new_command.created_at = current_command.created_at.clone();
new_command.updated_at = start_ts.clone();
self.db
.commands
.update_one(
&new_command.id,
mungos::Update::Regular(new_command.clone()),
)
.await
.context("failed at update one command")?;
let diff = current_command.diff(&new_command);
let update = Update {
operation: Operation::UpdateCommand,
target: UpdateTarget::Command(new_command.id.clone()),
start_ts,
status: UpdateStatus::Complete,
logs: vec![Log::simple(
"command update",
serde_json::to_string_pretty(&diff).unwrap(),
)],
operator: user.id.clone(),
success: true,
..Default::default()
};
self.add_update(update.clone()).await?;
self.update_update(update).await?;
Ok(new_command)
}
pub async fn run_command(
&self,
command_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.command_action_states.busy(command_id).await {
return Err(anyhow!("command busy"));
}
self.command_action_states
.update_entry(command_id.to_string(), |entry| {
entry.running = true;
})
.await;
let res = self.run_command_inner(command_id, user).await;
self.command_action_states
.update_entry(command_id.to_string(), |entry| {
entry.running = false;
})
.await;
res
}
async fn run_command_inner(
&self,
command_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let command = self
.get_command_check_permissions(command_id, user, PermissionLevel::Execute)
.await?;
if command.command.path.is_empty() || command.command.command.is_empty() {
return Err(anyhow!("command or path is empty, aborting"));
}
let server = self.db.get_server(&command.server_id).await?;
let mut update = Update {
target: UpdateTarget::Command(command_id.to_string()),
operation: Operation::RunCommand,
start_ts,
status: UpdateStatus::InProgress,
success: true,
operator: user.id.clone(),
..Default::default()
};
update.id = self.add_update(update.clone()).await?;
match self.periphery.run_command(&server, &command.command).await {
Ok(log) => {
update.logs.push(log);
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
}
}
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
self.update_update(update.clone()).await?;
Ok(update)
}
}

View File

@@ -1,12 +1,11 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use helpers::{all_logs_success, to_monitor_name};
use mungos::doc;
use mungos::mongodb::bson::doc;
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Deployment, DeploymentWithContainerState, DockerContainerState, Log, Operation,
PermissionLevel, ServerStatus, ServerWithStatus, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, traits::Permissioned, Deployment, DeploymentBuilder,
DeploymentWithContainerState, DockerContainerState, Log, Operation, PermissionLevel,
ServerStatus, ServerWithStatus, TerminationSignal, Update, UpdateStatus, UpdateTarget,
};
use crate::{
@@ -33,13 +32,6 @@ impl State {
}
}
pub async fn deployment_busy(&self, id: &str) -> bool {
match self.deployment_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_deployment(
&self,
name: &str,
@@ -49,16 +41,18 @@ impl State {
self.get_server_check_permissions(&server_id, user, PermissionLevel::Update)
.await?;
let start_ts = monitor_timestamp();
let deployment = Deployment {
name: to_monitor_name(name),
server_id,
permissions: [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect(),
created_at: start_ts.clone(),
updated_at: start_ts.clone(),
..Default::default()
};
let mut deployment = DeploymentBuilder::default()
.name(to_monitor_name(name))
.server_id(server_id)
.build()
.context("failed to build deployment")?;
deployment.permissions = [(user.id.clone(), PermissionLevel::Update)]
.into_iter()
.collect();
deployment.created_at = start_ts.clone();
deployment.updated_at = start_ts.clone();
let deployment_id = self
.db
.deployments
@@ -112,8 +106,10 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Deployment> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
let deployment = self
@@ -123,7 +119,7 @@ impl State {
let server = self.db.get_server(&deployment.server_id).await?;
let log = match self
.periphery
.container_remove(&server, &deployment.name)
.container_remove(&server, &deployment.name, stop_signal, stop_time)
.await
{
Ok(log) => log,
@@ -164,21 +160,25 @@ impl State {
new_deployment: Deployment,
user: &RequestUser,
) -> anyhow::Result<Deployment> {
if self.deployment_busy(&new_deployment.id).await {
if self.deployment_action_states.busy(&new_deployment.id).await {
return Err(anyhow!("deployment busy"));
}
let id = new_deployment.id.clone();
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(id.clone()).or_default();
entry.updating = true;
}
self.deployment_action_states
.update_entry(id.clone(), |entry| {
entry.updating = true;
})
.await;
let res = self.update_deployment_inner(new_deployment, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(id).or_default();
entry.updating = false;
}
self.deployment_action_states
.update_entry(id.clone(), |entry| {
entry.updating = false;
})
.await;
res
}
@@ -282,22 +282,25 @@ impl State {
new_name: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(&deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.renaming = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.renaming = true;
})
.await;
let res = self
.rename_deployment_inner(deployment_id, new_name, user)
.await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.renaming = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.renaming = false;
})
.await;
res
}
@@ -367,7 +370,9 @@ impl State {
self.update_update(update).await?;
return Err(deployment_state.err().unwrap());
}
let DeploymentWithContainerState { state, .. } = deployment_state.unwrap();
let DeploymentWithContainerState {
deployment, state, ..
} = deployment_state.unwrap();
if state != DockerContainerState::NotDeployed {
let log = self
.periphery
@@ -406,7 +411,6 @@ impl State {
)
.await
.context("failed to update deployment name on mongo");
if let Err(e) = res {
update
.logs
@@ -418,6 +422,20 @@ impl State {
))
}
if deployment.repo.is_some() {
let res = self.reclone_deployment(deployment_id, user, false).await;
if let Err(e) = res {
update
.logs
.push(Log::error("reclone repo", format!("{e:?}")));
} else {
update.logs.push(Log::simple(
"reclone repo",
"deployment repo cloned with new name".to_string(),
));
}
}
update.end_ts = monitor_timestamp().into();
update.status = UpdateStatus::Complete;
update.success = all_logs_success(&update.logs);
@@ -431,21 +449,25 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
check_deployment_busy: bool,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if check_deployment_busy && self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.recloning = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.recloning = true;
})
.await;
let res = self.reclone_deployment_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.recloning = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.recloning = false;
})
.await;
res
}
@@ -469,19 +491,18 @@ impl State {
};
update.id = self.add_update(update.clone()).await?;
update.success = match self.periphery.clone_repo(&server, &deployment).await {
match self.periphery.clone_repo(&server, &deployment).await {
Ok(clone_logs) => {
update.logs.extend(clone_logs);
true
}
Err(e) => {
update
.logs
.push(Log::error("clone repo", format!("{e:#?}")));
false
}
};
update.success = all_logs_success(&update.logs);
update.status = UpdateStatus::Complete;
update.end_ts = Some(monitor_timestamp());
@@ -494,21 +515,27 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.deploying = true;
}
let res = self.deploy_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.deploying = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.deploying = true;
})
.await;
let res = self
.deploy_container_inner(deployment_id, user, stop_signal, stop_time)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.deploying = false;
})
.await;
res
}
@@ -516,6 +543,8 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let mut deployment = self
@@ -553,7 +582,14 @@ impl State {
update.id = self.add_update(update.clone()).await?;
let deploy_log = match self.periphery.deploy(&server, &deployment).await {
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
let deploy_log = match self
.periphery
.deploy(&server, &deployment, stop_signal, stop_time)
.await
{
Ok(log) => log,
Err(e) => Log::error("deploy container", format!("{e:#?}")),
};
@@ -573,20 +609,22 @@ impl State {
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.starting = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.starting = true;
})
.await;
let res = self.start_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.starting = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.starting = false;
})
.await;
res
}
@@ -642,21 +680,27 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.stopping = true;
}
let res = self.stop_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.stopping = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.stopping = true;
})
.await;
let res = self
.stop_container_inner(deployment_id, user, stop_signal, stop_time)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.stopping = false;
})
.await;
res
}
@@ -664,6 +708,8 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
@@ -681,9 +727,12 @@ impl State {
};
update.id = self.add_update(update.clone()).await?;
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
let log = self
.periphery
.container_stop(&server, &deployment.name)
.container_stop(&server, &deployment.name, stop_signal, stop_time)
.await;
update.success = match log {
@@ -712,21 +761,27 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.removing = true;
}
let res = self.remove_container_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.removing = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.removing = true;
})
.await;
let res = self
.remove_container_inner(deployment_id, user, stop_signal, stop_time)
.await;
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.removing = false;
})
.await;
res
}
@@ -734,6 +789,8 @@ impl State {
&self,
deployment_id: &str,
user: &RequestUser,
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
) -> anyhow::Result<Update> {
let start_ts = monitor_timestamp();
let deployment = self
@@ -751,9 +808,12 @@ impl State {
};
update.id = self.add_update(update.clone()).await?;
let stop_signal = stop_signal.unwrap_or(deployment.termination_signal).into();
let stop_time = stop_time.unwrap_or(deployment.termination_timeout).into();
let log = self
.periphery
.container_remove(&server, &deployment.name)
.container_remove(&server, &deployment.name, stop_signal, stop_time)
.await;
update.success = match log {
@@ -783,20 +843,22 @@ impl State {
deployment_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.deployment_busy(deployment_id).await {
if self.deployment_action_states.busy(deployment_id).await {
return Err(anyhow!("deployment busy"));
}
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.pulling = true;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.pulling = true;
})
.await;
let res = self.pull_deployment_repo_inner(deployment_id, user).await;
{
let mut lock = self.deployment_action_states.lock().await;
let entry = lock.entry(deployment_id.to_string()).or_default();
entry.pulling = false;
}
self.deployment_action_states
.update_entry(deployment_id.to_string(), |entry| {
entry.pulling = false;
})
.await;
res
}

View File

@@ -11,17 +11,17 @@ use crate::{auth::RequestUser, state::State};
impl State {
pub async fn get_group_check_permissions(
&self,
deployment_id: &str,
group_id: &str,
user: &RequestUser,
permission_level: PermissionLevel,
) -> anyhow::Result<Group> {
let group = self.db.get_group(deployment_id).await?;
let group = self.db.get_group(group_id).await?;
let permissions = group.get_user_permissions(&user.id);
if user.is_admin || permissions >= permission_level {
Ok(group)
} else {
Err(anyhow!(
"user does not have required permissions on this deployment"
"user does not have required permissions on this group"
))
}
}

View File

@@ -4,6 +4,7 @@ use types::Update;
use crate::state::State;
mod build;
mod command;
mod deployment;
mod group;
mod procedure;

View File

@@ -209,7 +209,7 @@ impl State {
}
StopContainer => {
let update = self
.stop_container(&target_id, user)
.stop_container(&target_id, user, Option::None, Option::None)
.await
.context(format!(
"failed at stop container for deployment (id: {target_id})"
@@ -218,7 +218,7 @@ impl State {
}
RemoveContainer => {
let update = self
.remove_container(&target_id, user)
.remove_container(&target_id, user, Option::None, Option::None)
.await
.context(format!(
"failed at remove container for deployment (id: {target_id})"
@@ -227,7 +227,7 @@ impl State {
}
DeployContainer => {
let update = self
.deploy_container(&target_id, user)
.deploy_container(&target_id, user, Option::None, Option::None)
.await
.context(format!(
"failed at deploy container for deployment (id: {target_id})"
@@ -236,14 +236,18 @@ impl State {
}
RecloneDeployment => {
let update = self
.reclone_deployment(&target_id, user)
.reclone_deployment(&target_id, user, true)
.await
.context(format!("failed at reclone deployment (id: {target_id})"))?;
updates.push(update);
}
PullDeployment => {
// implement this one
// let update = self.pull
let update = self
.pull_deployment_repo(&target_id, user)
.await
.context(format!("failed at pull deployment (id: {target_id})"))?;
updates.push(update);
}
// build
BuildBuild => {

View File

@@ -1,11 +1,10 @@
use anyhow::{anyhow, Context};
use diff::Diff;
use futures_util::future::join_all;
use mungos::doc;
use mungos::mongodb::bson::doc;
use types::{
monitor_timestamp,
traits::{Busy, Permissioned},
Log, Operation, PermissionLevel, Server, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, traits::Permissioned, Log, Operation, PermissionLevel, Server, Update,
UpdateStatus, UpdateTarget,
};
use crate::{auth::RequestUser, state::State};
@@ -28,13 +27,6 @@ impl State {
}
}
pub async fn server_busy(&self, id: &str) -> bool {
match self.server_action_states.lock().await.get(id) {
Some(a) => a.busy(),
None => false,
}
}
pub async fn create_server(
&self,
name: &str,
@@ -96,7 +88,7 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Server> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
let server = self
@@ -121,14 +113,14 @@ impl State {
.get_some(doc! { "server_id": server_id }, None)
.await?
.into_iter()
.map(|d| async move { self.delete_deployment(&d.id, user).await });
.map(|d| async move { self.delete_deployment(&d.id, user, None, None).await });
let delete_builds = self
.db
.builds
.get_some(doc! { "server_id": server_id }, None)
.await?
.into_iter()
.map(|d| async move { self.delete_deployment(&d.id, user).await });
.map(|d| async move { self.delete_deployment(&d.id, user, None, None).await });
let update_groups = self
.db
.groups
@@ -164,7 +156,7 @@ impl State {
mut new_server: Server,
user: &RequestUser,
) -> anyhow::Result<Server> {
if self.server_busy(&new_server.id).await {
if self.server_action_states.busy(&new_server.id).await {
return Err(anyhow!("server busy"));
}
let current_server = self
@@ -208,20 +200,22 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_networks = true;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_networks = true;
})
.await;
let res = self.prune_networks_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_networks = false;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_networks = false;
})
.await;
res
}
@@ -269,20 +263,22 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_images = true;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_images = true;
})
.await;
let res = self.prune_images_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_images = false;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_images = false;
})
.await;
res
}
@@ -331,20 +327,22 @@ impl State {
server_id: &str,
user: &RequestUser,
) -> anyhow::Result<Update> {
if self.server_busy(server_id).await {
if self.server_action_states.busy(server_id).await {
return Err(anyhow!("server busy"));
}
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_containers = true;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_containers = true;
})
.await;
let res = self.prune_containers_inner(server_id, user).await;
{
let mut lock = self.server_action_states.lock().await;
let entry = lock.entry(server_id.to_string()).or_default();
entry.pruning_containers = false;
}
self.server_action_states
.update_entry(server_id.to_string(), |entry| {
entry.pruning_containers = false;
})
.await;
res
}

View File

@@ -1,18 +1,27 @@
use std::{cmp::Ordering, collections::HashMap};
use anyhow::Context;
use async_timing_util::unix_timestamp_ms;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
};
use futures_util::TryStreamExt;
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, FindOptions, Serialize};
use mungos::mongodb::{
bson::{doc, Document},
options::FindOptions,
};
use serde::{Deserialize, Serialize};
use types::{
traits::Permissioned, AwsBuilderConfig, Build, BuildActionState, BuildVersionsReponse,
Operation, PermissionLevel, UpdateStatus,
monitor_ts_from_unix, traits::Permissioned, unix_from_monitor_ts, AwsBuilderConfig, Build,
BuildActionState, BuildVersionsReponse, Operation, PermissionLevel, UpdateStatus,
};
use typeshare::typeshare;
const NUM_VERSIONS_PER_PAGE: u64 = 10;
const ONE_DAY_MS: i64 = 86400000;
use crate::{
auth::{RequestUser, RequestUserExtension},
@@ -49,12 +58,35 @@ pub struct BuildVersionsQuery {
patch: Option<i32>,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct BuildStatsQuery {
#[serde(default)]
page: u32,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct BuildStatsResponse {
pub total_time: f64, // in hours
pub total_count: f64, // number of builds
pub days: Vec<BuildStatsDay>,
}
#[typeshare]
#[derive(Serialize, Deserialize, Default)]
pub struct BuildStatsDay {
pub time: f64,
pub count: f64,
pub ts: f64,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let build = state
@@ -68,7 +100,7 @@ pub fn router() -> Router {
.route(
"/list",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Query(query): Query<Document>| async move {
let builds = state
@@ -82,7 +114,7 @@ pub fn router() -> Router {
.route(
"/create",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<CreateBuildBody>| async move {
let build = state
@@ -96,7 +128,7 @@ pub fn router() -> Router {
.route(
"/create_full",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<Build>| async move {
let build = spawn_request_action(async move {
@@ -113,7 +145,7 @@ pub fn router() -> Router {
.route(
"/:id/copy",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }): Path<BuildId>,
Json(build): Json<CopyBuildBody>| async move {
@@ -131,7 +163,7 @@ pub fn router() -> Router {
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let build = spawn_request_action(async move {
@@ -148,7 +180,7 @@ pub fn router() -> Router {
.route(
"/update",
patch(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Json(build): Json<Build>| async move {
let build = spawn_request_action(async move {
@@ -165,7 +197,7 @@ pub fn router() -> Router {
.route(
"/:id/build",
post(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(build_id): Path<BuildId>| async move {
let update = spawn_request_action(async move {
@@ -182,7 +214,7 @@ pub fn router() -> Router {
.route(
"/:id/action_state",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }): Path<BuildId>| async move {
let action_state = state
@@ -196,7 +228,7 @@ pub fn router() -> Router {
.route(
"/:id/versions",
get(
|Extension(state): StateExtension,
|state: StateExtension,
Extension(user): RequestUserExtension,
Path(BuildId { id }),
Query(query): Query<BuildVersionsQuery>| async move {
@@ -210,7 +242,7 @@ pub fn router() -> Router {
)
.route(
"/aws_builder_defaults",
get(|Extension(state): StateExtension| async move {
get(|state: StateExtension| async move {
Json(AwsBuilderConfig {
access_key_id: String::new(),
secret_access_key: String::new(),
@@ -220,10 +252,17 @@ pub fn router() -> Router {
)
.route(
"/docker_organizations",
get(|Extension(state): StateExtension| async move {
get(|state: StateExtension| async move {
Json(state.config.docker_organizations.clone())
}),
)
.route(
"/stats",
get(|state: StateExtension, query: Query<BuildStatsQuery>| async move {
let stats = state.get_build_stats(query.page).await.map_err(handle_anyhow_error)?;
response!(Json(stats))
}),
)
}
impl State {
@@ -258,17 +297,11 @@ impl State {
) -> anyhow::Result<BuildActionState> {
self.get_build_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.build_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
let action_state = self.build_action_states.get_or_default(id).await;
Ok(action_state)
}
pub async fn get_build_versions(
async fn get_build_versions(
&self,
id: &str,
user: &RequestUser,
@@ -317,4 +350,86 @@ impl State {
.collect();
Ok(versions)
}
async fn get_build_stats(&self, page: u32) -> anyhow::Result<BuildStatsResponse> {
let curr_ts = unix_timestamp_ms() as i64;
let next_day = curr_ts - curr_ts % ONE_DAY_MS + ONE_DAY_MS;
let close_ts = next_day - page as i64 * 30 * ONE_DAY_MS;
let open_ts = close_ts - 30 * ONE_DAY_MS;
let mut build_updates = self
.db
.updates
.collection
.find(
doc! {
"start_ts": {
"$gte": monitor_ts_from_unix(open_ts)
.context("open_ts out of bounds")?,
"$lt": monitor_ts_from_unix(close_ts)
.context("close_ts out of bounds")?
},
"operation": Operation::BuildBuild.to_string(),
},
None,
)
.await?;
let mut days = HashMap::<i64, BuildStatsDay>::with_capacity(32);
let mut curr = open_ts;
while curr < close_ts {
let stats = BuildStatsDay {
ts: curr as f64,
..Default::default()
};
days.insert(curr, stats);
curr += ONE_DAY_MS;
}
while let Some(update) = build_updates.try_next().await? {
if let Some(end_ts) = update.end_ts {
let start_ts = unix_from_monitor_ts(&update.start_ts)
.context("failed to parse update start_ts")?;
let end_ts =
unix_from_monitor_ts(&end_ts).context("failed to parse update end_ts")?;
let day = start_ts - start_ts % ONE_DAY_MS;
let mut entry = days.entry(day).or_default();
entry.count += 1.0;
entry.time += ms_to_hour(end_ts - start_ts);
}
}
Ok(BuildStatsResponse::new(days.into_values().collect()))
}
}
impl BuildStatsResponse {
fn new(mut days: Vec<BuildStatsDay>) -> BuildStatsResponse {
days.sort_by(|a, b| {
if a.ts < b.ts {
Ordering::Less
} else {
Ordering::Greater
}
});
let mut total_time = 0.0;
let mut total_count = 0.0;
for day in &days {
total_time += day.time;
total_count += day.count;
}
BuildStatsResponse {
total_time,
total_count,
days,
}
}
}
const MS_TO_HOUR_DIVISOR: f64 = 1000.0 * 60.0 * 60.0;
fn ms_to_hour(duration: i64) -> f64 {
duration as f64 / MS_TO_HOUR_DIVISOR
}

220
core/src/api/command.rs Normal file
View File

@@ -0,0 +1,220 @@
use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::mongodb::bson::Document;
use serde::{Deserialize, Serialize};
use types::{traits::Permissioned, CommandActionState, PeripheryCommand, PermissionLevel};
use typeshare::typeshare;
use crate::{
api::spawn_request_action,
auth::{RequestUser, RequestUserExtension},
response,
state::{State, StateExtension},
};
#[derive(Serialize, Deserialize)]
pub struct CommandId {
id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CreateCommandBody {
name: String,
server_id: String,
}
#[typeshare]
#[derive(Serialize, Deserialize)]
pub struct CopyCommandBody {
name: String,
server_id: String,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let command = state
.get_command_check_permissions(&id, &user, PermissionLevel::Read)
.await
.map_err(handle_anyhow_error)?;
response!(Json(command))
},
),
)
.route(
"/list",
get(
|state: StateExtension,
user: RequestUserExtension,
Query(query): Query<Document>| async move {
let commands = state
.list_commands(&user, query)
.await
.map_err(handle_anyhow_error)?;
response!(Json(commands))
},
),
)
.route(
"/create",
post(
|state: StateExtension,
user: RequestUserExtension,
Json(command): Json<CreateCommandBody>| async move {
let command = state
.create_command(&command.name, command.server_id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(command))
},
),
)
.route(
"/create_full",
post(
|state: StateExtension,
user: RequestUserExtension,
Json(command): Json<PeripheryCommand>| async move {
let command = spawn_request_action(async move {
state
.create_full_command(command, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(command))
},
),
)
.route(
"/:id/copy",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id }),
Json(command): Json<CopyCommandBody>| async move {
let command = spawn_request_action(async move {
state
.copy_command(&id, command.name, command.server_id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(command))
},
),
)
.route(
"/:id/delete",
delete(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let build = spawn_request_action(async move {
state
.delete_command(&id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(build))
},
),
)
.route(
"/update",
patch(
|state: StateExtension,
user: RequestUserExtension,
Json(command): Json<PeripheryCommand>| async move {
let command = spawn_request_action(async move {
state
.update_command(command, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(command))
},
),
)
.route(
"/:id/action_state",
get(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let action_state = state
.get_command_action_states(id, &user)
.await
.map_err(handle_anyhow_error)?;
response!(Json(action_state))
},
),
)
.route(
"/:id/run",
post(
|state: StateExtension,
user: RequestUserExtension,
Path(CommandId { id })| async move {
let update = spawn_request_action(async move {
state
.run_command(&id, &user)
.await
.map_err(handle_anyhow_error)
})
.await??;
response!(Json(update))
},
),
)
}
impl State {
async fn list_commands(
&self,
user: &RequestUser,
query: impl Into<Option<Document>>,
) -> anyhow::Result<Vec<PeripheryCommand>> {
let commands: Vec<PeripheryCommand> = self
.db
.commands
.get_some(query, None)
.await
.context("failed at get all commands query")?
.into_iter()
.filter(|s| {
if user.is_admin {
true
} else {
let permissions = s.get_user_permissions(&user.id);
permissions != PermissionLevel::None
}
})
.collect();
Ok(commands)
}
async fn get_command_action_states(
&self,
id: String,
user: &RequestUser,
) -> anyhow::Result<CommandActionState> {
self.get_command_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self.command_action_states.get_or_default(id).await;
Ok(action_state)
}
}

View File

@@ -4,15 +4,19 @@ use anyhow::Context;
use axum::{
extract::{Path, Query},
routing::{delete, get, patch, post},
Extension, Json, Router,
Json, Router,
};
use futures_util::future::join_all;
use helpers::handle_anyhow_error;
use mungos::{doc, options::FindOneOptions, Deserialize, Document, Serialize};
use mungos::mongodb::{
bson::{doc, Document},
options::FindOneOptions,
};
use serde::{Deserialize, Serialize};
use types::{
traits::Permissioned, Deployment, DeploymentActionState, DeploymentWithContainerState,
DockerContainerState, DockerContainerStats, Log, Operation, PermissionLevel, Server,
UpdateStatus,
TerminationSignal, UpdateStatus,
};
use typeshare::typeshare;
@@ -55,16 +59,23 @@ pub struct GetContainerLogQuery {
tail: Option<u32>,
}
#[typeshare]
#[derive(Deserialize)]
pub struct StopContainerQuery {
stop_signal: Option<TerminationSignal>,
stop_time: Option<i32>,
}
pub fn router() -> Router {
Router::new()
.route(
"/:id",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let res = state
.get_deployment_with_container_state(&user, &deployment_id.id)
.get_deployment_with_container_state(&user, &id)
.await
.map_err(handle_anyhow_error)?;
response!(Json(res))
@@ -74,8 +85,8 @@ pub fn router() -> Router {
.route(
"/list",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Query(query): Query<Document>| async move {
let deployments = state
.list_deployments_with_container_state(&user, query)
@@ -88,8 +99,8 @@ pub fn router() -> Router {
.route(
"/create",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Json(deployment): Json<CreateDeploymentBody>| async move {
let deployment = state
.create_deployment(&deployment.name, deployment.server_id, &user)
@@ -102,8 +113,8 @@ pub fn router() -> Router {
.route(
"/create_full",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Json(full_deployment): Json<Deployment>| async move {
let deployment = spawn_request_action(async move {
state
@@ -119,9 +130,9 @@ pub fn router() -> Router {
.route(
"/:id/copy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(DeploymentId { id }): Path<DeploymentId>,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Json(deployment): Json<CopyDeploymentBody>| async move {
let deployment = spawn_request_action(async move {
state
@@ -137,12 +148,13 @@ pub fn router() -> Router {
.route(
"/:id/delete",
delete(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId{ id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let deployment = spawn_request_action(async move {
state
.delete_deployment(&deployment_id.id, &user)
.delete_deployment(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -154,8 +166,8 @@ pub fn router() -> Router {
.route(
"/update",
patch(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Json(deployment): Json<Deployment>| async move {
let deployment = spawn_request_action(async move {
state
@@ -189,12 +201,12 @@ pub fn router() -> Router {
.route(
"/:id/reclone",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let update = spawn_request_action(async move {
state
.reclone_deployment(&deployment_id.id, &user)
.reclone_deployment(&id, &user, true)
.await
.map_err(handle_anyhow_error)
})
@@ -206,12 +218,13 @@ pub fn router() -> Router {
.route(
"/:id/deploy",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let update = spawn_request_action(async move {
state
.deploy_container(&deployment_id.id, &user)
.deploy_container(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -223,12 +236,12 @@ pub fn router() -> Router {
.route(
"/:id/start_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let update = spawn_request_action(async move {
state
.start_container(&deployment_id.id, &user)
.start_container(&id, &user)
.await
.map_err(handle_anyhow_error)
})
@@ -240,12 +253,13 @@ pub fn router() -> Router {
.route(
"/:id/stop_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let update = spawn_request_action(async move {
state
.stop_container(&deployment_id.id, &user)
.stop_container(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -257,12 +271,13 @@ pub fn router() -> Router {
.route(
"/:id/remove_container",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(StopContainerQuery { stop_signal, stop_time })| async move {
let update = spawn_request_action(async move {
state
.remove_container(&deployment_id.id, &user)
.remove_container(&id, &user, stop_signal, stop_time)
.await
.map_err(handle_anyhow_error)
})
@@ -274,12 +289,12 @@ pub fn router() -> Router {
.route(
"/:id/pull",
post(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>| async move {
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let update = spawn_request_action(async move {
state
.pull_deployment_repo(&deployment_id.id, &user)
.pull_deployment_repo(&id, &user)
.await
.map_err(handle_anyhow_error)
})
@@ -291,8 +306,8 @@ pub fn router() -> Router {
.route(
"/:id/action_state",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }): Path<DeploymentId>| async move {
let action_state = state
.get_deployment_action_states(id, &user)
@@ -305,12 +320,12 @@ pub fn router() -> Router {
.route(
"/:id/log",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Path(deployment_id): Path<DeploymentId>,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id }),
Query(query): Query<GetContainerLogQuery>| async move {
let log = state
.get_deployment_container_log(&deployment_id.id, &user, query.tail)
.get_deployment_container_log(&id, &user, query.tail)
.await
.map_err(handle_anyhow_error)?;
response!(Json(log))
@@ -320,8 +335,8 @@ pub fn router() -> Router {
.route(
"/:id/stats",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let stats = state
.get_deployment_container_stats(&id, &user)
@@ -334,8 +349,8 @@ pub fn router() -> Router {
.route(
"/:id/deployed_version",
get(
|Extension(state): StateExtension,
Extension(user): RequestUserExtension,
|state: StateExtension,
user: RequestUserExtension,
Path(DeploymentId { id })| async move {
let version = state
.get_deployment_deployed_version(&id, &user)
@@ -440,13 +455,7 @@ impl State {
) -> anyhow::Result<DeploymentActionState> {
self.get_deployment_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.deployment_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
let action_state = self.deployment_action_states.get_or_default(id).await;
Ok(action_state)
}

View File

@@ -4,7 +4,7 @@ use axum_oauth2::random_duration;
use helpers::handle_anyhow_error;
use hex::ToHex;
use hmac::{Hmac, Mac};
use mungos::Deserialize;
use serde::Deserialize;
use sha2::Sha256;
use types::GITHUB_WEBHOOK_USER_ID;

View File

@@ -5,7 +5,8 @@ use axum::{
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use mungos::mongodb::bson::Document;
use serde::{Deserialize, Serialize};
use types::{traits::Permissioned, Group, PermissionLevel};
use typeshare::typeshare;

View File

@@ -9,7 +9,8 @@ use axum::{
};
use futures_util::Future;
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use mungos::mongodb::bson::doc;
use serde::Deserialize;
use types::{PermissionLevel, UpdateTarget, User};
use typeshare::typeshare;
@@ -20,15 +21,16 @@ use crate::{
ResponseResult,
};
pub mod build;
pub mod deployment;
mod build;
mod command;
mod deployment;
mod github_listener;
pub mod group;
pub mod permissions;
pub mod procedure;
pub mod secret;
pub mod server;
pub mod update;
mod group;
mod permissions;
mod procedure;
mod secret;
mod server;
mod update;
#[typeshare]
#[derive(Deserialize)]
@@ -94,6 +96,7 @@ pub fn router() -> Router {
.nest("/build", build::router())
.nest("/deployment", deployment::router())
.nest("/server", server::router())
.nest("/command", command::router())
.nest("/procedure", procedure::router())
.nest("/group", group::router())
.nest("/update", update::router())

View File

@@ -1,10 +1,11 @@
use anyhow::{anyhow, Context};
use axum::{routing::post, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, Serialize};
use mungos::mongodb::bson::{doc, Document};
use serde::{Deserialize, Serialize};
use types::{
monitor_timestamp, Build, Deployment, Log, Operation, PermissionLevel, PermissionsTarget,
Procedure, Server, Update, UpdateStatus, UpdateTarget,
monitor_timestamp, Build, Deployment, Group, Log, Operation, PermissionLevel,
PermissionsTarget, Procedure, Server, Update, UpdateStatus, UpdateTarget,
};
use typeshare::typeshare;
@@ -82,10 +83,10 @@ pub fn router() -> Router {
async fn update_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(permission_update): Json<PermissionsUpdateBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user not authorized for this action (is not admin)"
));
@@ -107,7 +108,7 @@ async fn update_permissions(
operation: Operation::ModifyUserPermissions,
start_ts: monitor_timestamp(),
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
status: UpdateStatus::Complete,
..Default::default()
};
@@ -199,9 +200,9 @@ async fn update_permissions(
.procedures
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find build query")?
.context("failed at find procedure query")?
.ok_or(anyhow!(
"failed to find a build with id {}",
"failed to find a procedure with id {}",
permission_update.target_id
))?;
state
@@ -220,6 +221,33 @@ async fn update_permissions(
target_user.username, permission_update.permission, procedure.name
)
}
PermissionsTarget::Group => {
let group = state
.db
.groups
.find_one_by_id(&permission_update.target_id)
.await
.context("failed at find group query")?
.ok_or(anyhow!(
"failed to find a group with id {}",
permission_update.target_id
))?;
state
.db
.groups
.update_one::<Group>(
&permission_update.target_id,
mungos::Update::Set(doc! {
format!("permissions.{}", permission_update.user_id): permission_update.permission.to_string()
}),
)
.await?;
update.target = UpdateTarget::Group(group.id);
format!(
"user {} given {} permissions on group {}",
target_user.username, permission_update.permission, group.name
)
}
};
update
.logs
@@ -231,10 +259,10 @@ async fn update_permissions(
async fn modify_user_enabled(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(ModifyUserEnabledBody { user_id, enabled }): Json<ModifyUserEnabledBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
@@ -264,7 +292,7 @@ async fn modify_user_enabled(
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
@@ -273,13 +301,13 @@ async fn modify_user_enabled(
async fn modify_user_create_server_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(ModifyUserCreateServerBody {
user_id,
create_server_permissions,
}): Json<ModifyUserCreateServerBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
@@ -319,7 +347,7 @@ async fn modify_user_create_server_permissions(
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;
@@ -328,13 +356,13 @@ async fn modify_user_create_server_permissions(
async fn modify_user_create_build_permissions(
Extension(state): StateExtension,
Extension(user): RequestUserExtension,
Extension(req_user): RequestUserExtension,
Json(ModifyUserCreateBuildBody {
user_id,
create_build_permissions,
}): Json<ModifyUserCreateBuildBody>,
) -> anyhow::Result<Update> {
if !user.is_admin {
if !req_user.is_admin {
return Err(anyhow!(
"user does not have permissions for this action (not admin)"
));
@@ -374,7 +402,7 @@ async fn modify_user_create_build_permissions(
end_ts: Some(ts),
status: UpdateStatus::Complete,
success: true,
operator: user.id.clone(),
operator: req_user.id.clone(),
..Default::default()
};
update.id = state.add_update(update.clone()).await?;

View File

@@ -5,7 +5,8 @@ use axum::{
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Document, Serialize};
use mungos::mongodb::bson::Document;
use serde::{Deserialize, Serialize};
use types::{traits::Permissioned, PermissionLevel, Procedure};
use typeshare::typeshare;

View File

@@ -5,7 +5,11 @@ use axum::{
Extension, Json, Router,
};
use helpers::{generate_secret, handle_anyhow_error};
use mungos::{doc, to_bson, Deserialize, Document, Serialize, Update};
use mungos::{
mongodb::bson::{doc, to_bson, Document},
Update,
};
use serde::{Deserialize, Serialize};
use types::{monitor_timestamp, ApiSecret};
use typeshare::typeshare;

View File

@@ -1,5 +1,5 @@
use anyhow::{anyhow, Context};
use async_timing_util::get_timelength_in_ms;
use async_timing_util::{get_timelength_in_ms, unix_timestamp_ms};
use axum::{
extract::{ws::Message as AxumMessage, Path, Query, WebSocketUpgrade},
response::IntoResponse,
@@ -8,7 +8,11 @@ use axum::{
};
use futures_util::{future::join_all, SinkExt, StreamExt};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document};
use mungos::mongodb::{
bson::{doc, Document},
options::FindOptions,
};
use serde::Deserialize;
use tokio::select;
use tokio_tungstenite::tungstenite::Message;
use tokio_util::sync::CancellationToken;
@@ -19,7 +23,7 @@ use types::{
};
use typeshare::typeshare;
const MAX_HISTORICAL_STATS_LIMIT: i64 = 1000;
const MAX_HISTORICAL_STATS_LIMIT: i64 = 500;
use crate::{
auth::{RequestUser, RequestUserExtension},
@@ -483,14 +487,9 @@ impl State {
user: &RequestUser,
query: &HistoricalStatsQuery,
) -> anyhow::Result<Vec<SystemStatsRecord>> {
let limit = if query.limit as i64 > MAX_HISTORICAL_STATS_LIMIT {
MAX_HISTORICAL_STATS_LIMIT
} else {
query.limit as i64
};
self.get_server_check_permissions(server_id, user, PermissionLevel::Read)
.await?;
let ts_mod = get_timelength_in_ms(query.interval.to_string().parse().unwrap()) as i64;
let mut projection = doc! { "processes": 0, "disk.disks": 0 };
if !query.networks {
projection.insert("networks", 0);
@@ -498,14 +497,30 @@ impl State {
if !query.components {
projection.insert("components", 0);
}
let limit = if query.limit as i64 > MAX_HISTORICAL_STATS_LIMIT {
MAX_HISTORICAL_STATS_LIMIT
} else {
query.limit as i64
};
let interval = get_timelength_in_ms(query.interval.to_string().parse().unwrap()) as i64;
let mut ts_vec = Vec::<i64>::new();
let curr_ts = unix_timestamp_ms() as i64;
let mut curr_ts = curr_ts - curr_ts % interval - interval * limit * query.page as i64;
for _ in 0..limit {
ts_vec.push(curr_ts);
curr_ts -= interval;
}
self.db
.stats
.get_most_recent(
"ts",
limit,
query.page as u64 * limit as u64,
doc! { "server_id": server_id, "ts": { "$mod": [ts_mod, 0] } },
projection,
.get_some(
doc! {
"server_id": server_id,
"ts": { "$in": ts_vec }
},
FindOptions::builder()
.sort(doc! { "ts": 1 })
.projection(projection)
.build(),
)
.await
.context("failed at mongo query to get stats")
@@ -665,13 +680,7 @@ impl State {
) -> anyhow::Result<ServerActionState> {
self.get_server_check_permissions(&id, &user, PermissionLevel::Read)
.await?;
let action_state = self
.server_action_states
.lock()
.await
.entry(id)
.or_default()
.clone();
let action_state = self.server_action_states.get_or_default(id).await;
Ok(action_state)
}
}

View File

@@ -1,7 +1,7 @@
use anyhow::{anyhow, Context};
use axum::{extract::Query, routing::get, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, to_bson};
use mungos::mongodb::bson::{doc, to_bson};
use serde_json::Value;
use types::{PermissionLevel, Update, UpdateTarget};
@@ -92,6 +92,10 @@ impl State {
.get_group_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
UpdateTarget::Command(id) => self
.get_command_check_permissions(id, user, PermissionLevel::Read)
.await
.map(|_| ()),
}
}
}

View File

@@ -4,7 +4,8 @@ use anyhow::{anyhow, Context};
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
use axum_oauth2::github::{GithubOauthClient, GithubOauthExtension};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use mungos::mongodb::bson::doc;
use serde::Deserialize;
use types::{monitor_timestamp, CoreConfig, User};
use crate::{response, state::StateExtension};
@@ -68,10 +69,15 @@ async fn callback(
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
let user = User {
username: github_user.login,
avatar: github_user.avatar_url.into(),
github_id: github_id.into(),
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()

View File

@@ -4,7 +4,8 @@ use anyhow::{anyhow, Context};
use axum::{extract::Query, response::Redirect, routing::get, Extension, Router};
use axum_oauth2::google::{GoogleOauthClient, GoogleOauthExtension};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize};
use mungos::mongodb::bson::doc;
use serde::Deserialize;
use types::{monitor_timestamp, CoreConfig, User};
use crate::{response, state::StateExtension};
@@ -85,6 +86,7 @@ async fn callback(
.context("failed to generate jwt")?,
None => {
let ts = monitor_timestamp();
let no_users_exist = state.db.users.find_one(None, None).await?.is_none();
let user = User {
username: google_user
.email
@@ -95,6 +97,10 @@ async fn callback(
.to_string(),
avatar: google_user.picture.into(),
google_id: google_id.into(),
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()

View File

@@ -9,7 +9,7 @@ use axum::{body::Body, http::Request, Extension};
use axum_oauth2::random_string;
use hmac::{Hmac, Mac};
use jwt::{SignWithKey, VerifyWithKey};
use mungos::{Deserialize, Serialize};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use types::{CoreConfig, User};
@@ -20,6 +20,7 @@ pub type RequestUserExtension = Extension<Arc<RequestUser>>;
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
#[derive(Default)]
pub struct RequestUser {
pub id: String,
pub is_admin: bool,

View File

@@ -1,7 +1,7 @@
use anyhow::{anyhow, Context};
use axum::{extract::Json, routing::post, Extension, Router};
use helpers::handle_anyhow_error;
use mungos::doc;
use mungos::mongodb::bson::doc;
use types::{monitor_timestamp, User, UserCredentials};
use crate::state::StateExtension;
@@ -47,6 +47,7 @@ async fn create_user_handler(
enabled: no_users_exist,
admin: no_users_exist,
create_server_permissions: no_users_exist,
create_build_permissions: no_users_exist,
created_at: ts.clone(),
updated_at: ts,
..Default::default()

View File

@@ -9,7 +9,7 @@ use axum::{
Extension, Json, Router,
};
use helpers::handle_anyhow_error;
use mungos::{Deserialize, Serialize};
use serde::{Deserialize, Serialize};
use types::CoreConfig;
use typeshare::typeshare;

View File

@@ -2,7 +2,11 @@ use anyhow::{anyhow, Context};
use async_timing_util::unix_timestamp_ms;
use axum::{routing::post, Extension, Json, Router};
use helpers::handle_anyhow_error;
use mungos::{doc, Deserialize, Document, Update};
use mungos::{
mongodb::bson::{doc, Document},
Update,
};
use serde::Deserialize;
use types::unix_from_monitor_ts;
use crate::state::StateExtension;

View File

@@ -1,14 +1,14 @@
use std::time::Duration;
use anyhow::{anyhow, Context};
use aws_sdk_ec2::model::{
BlockDeviceMapping, EbsBlockDevice, InstanceNetworkInterfaceSpecification, InstanceStateChange,
InstanceStateName, InstanceStatus, ResourceType, Tag, TagSpecification,
};
pub use aws_sdk_ec2::{
model::InstanceType,
output::{DescribeInstanceStatusOutput, TerminateInstancesOutput},
Client, Region,
use aws_sdk_ec2::{
config::Region,
types::{
BlockDeviceMapping, EbsBlockDevice, InstanceNetworkInterfaceSpecification,
InstanceStateChange, InstanceStateName, InstanceStatus, InstanceType, ResourceType, Tag,
TagSpecification,
},
Client,
};
use types::Server;

View File

@@ -1,9 +1,12 @@
use axum_extra::routing::SpaRouter;
use axum::Router;
use dotenv::dotenv;
use helpers::parse_config_file;
use mungos::Deserialize;
use merge_config_files::parse_config_file;
use serde::Deserialize;
use tower_http::services::{ServeDir, ServeFile};
use types::CoreConfig;
type SpaRouter = Router;
#[derive(Deserialize, Debug)]
struct Env {
#[serde(default = "default_config_path")]
@@ -12,12 +15,17 @@ struct Env {
pub frontend_path: String,
}
pub fn load() -> (CoreConfig, SpaRouter) {
pub fn load() -> (CoreConfig, SpaRouter, ServeFile) {
dotenv().ok();
let env: Env = envy::from_env().expect("failed to parse environment variables");
let config = parse_config_file(&env.config_path).expect("failed to parse config");
let spa_router = SpaRouter::new("/assets", env.frontend_path);
(config, spa_router)
let config = parse_config_file(env.config_path).expect("failed to parse config");
let spa_router = Router::new().nest_service(
"/assets",
ServeDir::new(&env.frontend_path)
.not_found_service(ServeFile::new(format!("{}/index.html", env.frontend_path))),
);
let index_html_service = ServeFile::new(format!("{}/index.html", env.frontend_path));
(config, spa_router, index_html_service)
}
pub fn default_config_path() -> String {

View File

@@ -1,9 +1,10 @@
use std::str::FromStr;
use std::{collections::HashMap, str::FromStr};
use anyhow::anyhow;
use diff::{Diff, OptionDiff};
use helpers::to_monitor_name;
use types::Build;
use tokio::sync::RwLock;
use types::{traits::Busy, Build};
#[macro_export]
macro_rules! response {
@@ -66,3 +67,37 @@ pub fn empty_or_only_spaces(word: &str) -> bool {
}
return true;
}
#[derive(Default)]
pub struct Cache<T: Clone + Default> {
cache: RwLock<HashMap<String, T>>,
}
impl<T: Clone + Default> Cache<T> {
pub async fn get(&self, key: &str) -> Option<T> {
self.cache.read().await.get(key).map(|e| e.clone())
}
pub async fn get_or_default(&self, key: String) -> T {
let mut cache = self.cache.write().await;
cache.entry(key).or_default().clone()
}
pub async fn update_entry(&self, key: String, handler: impl Fn(&mut T) -> ()) {
let mut cache = self.cache.write().await;
handler(cache.entry(key).or_default());
}
pub async fn clear(&self) {
self.cache.write().await.clear();
}
}
impl<T: Clone + Default + Busy> Cache<T> {
pub async fn busy(&self, id: &str) -> bool {
match self.get(id).await {
Some(state) => state.busy(),
None => false,
}
}
}

View File

@@ -4,6 +4,7 @@ use ::helpers::get_socket_addr;
use auth::JwtClient;
use axum::{http::StatusCode, Router};
use state::State;
use termination_signal::tokio::immediate_term_handle;
use tower_http::cors::{Any, CorsLayer};
mod actions;
@@ -20,29 +21,43 @@ type ResponseResult<T> = Result<T, (StatusCode, String)>;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let (config, spa_router) = config::load();
println!("version: v{}", env!("CARGO_PKG_VERSION"));
println!("starting monitor core on port {}...", config.port);
let term_signal = immediate_term_handle()?;
let app = Router::new()
.merge(spa_router)
.nest("/api", api::router())
.nest("/auth", auth::router(&config))
.nest("/ws", ws::router())
.layer(JwtClient::extension(&config))
.layer(State::extension(config.clone()).await)
.layer(
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any),
);
let app = tokio::spawn(async move {
let (config, spa_router, index_html_service) = config::load();
println!("started monitor core on port {}", config.port);
println!("starting monitor core on port {}...", config.port);
axum::Server::bind(&get_socket_addr(config.port))
.serve(app.into_make_service())
.await?;
let app = Router::new()
.nest("/api", api::router())
.nest("/auth", auth::router(&config))
.nest("/ws", ws::router())
.layer(JwtClient::extension(&config))
.layer(State::extension(config.clone()).await)
.merge(spa_router)
.fallback_service(index_html_service)
.layer(
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any),
);
println!("started monitor core on port {}", config.port);
axum::Server::bind(&get_socket_addr(config.port))
.serve(app.into_make_service())
.await?;
anyhow::Ok(())
});
tokio::select! {
res = app => return res?,
_ = term_signal => {},
}
Ok(())
}

View File

@@ -4,13 +4,13 @@ use async_timing_util::{
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS, ONE_HOUR_MS,
};
use futures_util::future::join_all;
use mungos::doc;
use mungos::mongodb::bson::doc;
use slack::types::Block;
use types::{Server, SystemStats, SystemStatsQuery, SystemStatsRecord};
use crate::state::State;
#[derive(Default)]
#[derive(Default, Clone)]
pub struct AlertStatus {
cpu_alert: bool,
mem_alert: bool,
@@ -100,16 +100,16 @@ impl State {
}
async fn check_cpu(&self, server: &Server, stats: &SystemStats) {
let server_alert_status = self.server_alert_status.lock().await;
if self.slack.is_none()
|| server_alert_status
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| s.cpu_alert)
.unwrap_or(false)
{
return;
}
drop(server_alert_status);
if stats.cpu_perc > server.cpu_alert {
let region = if let Some(region) = &server.region {
format!(" ({region})")
@@ -171,24 +171,26 @@ impl State {
server.name, stats.cpu_perc
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.cpu_alert = true;
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.cpu_alert = true;
})
.await;
}
}
}
async fn check_mem(&self, server: &Server, stats: &SystemStats) {
let server_alert_status = self.server_alert_status.lock().await;
if self.slack.is_none()
|| server_alert_status
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| s.mem_alert)
.unwrap_or(false)
{
return;
}
drop(server_alert_status);
let usage_perc = (stats.mem_used_gb / stats.mem_total_gb) * 100.0;
if usage_perc > server.mem_alert {
let region = if let Some(region) = &server.region {
@@ -254,25 +256,27 @@ impl State {
server.name, stats.mem_used_gb, stats.mem_total_gb,
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.mem_alert = true;
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.mem_alert = true;
})
.await;
}
}
}
async fn check_disk(&self, server: &Server, stats: &SystemStats) {
for disk in &stats.disk.disks {
let server_alert_status = self.server_alert_status.lock().await;
if self.slack.is_none()
|| server_alert_status
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| *s.disk_alert.get(&disk.mount).unwrap_or(&false))
.unwrap_or(false)
{
return;
}
drop(server_alert_status);
let usage_perc = (disk.used_gb / disk.total_gb) * 100.0;
if usage_perc > server.disk_alert {
let region = if let Some(region) = &server.region {
@@ -315,25 +319,27 @@ impl State {
server.name, stats.disk.used_gb, stats.disk.total_gb,
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.disk_alert.insert(disk.mount.clone(), true);
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.disk_alert.insert(disk.mount.clone(), true);
})
.await;
}
}
}
}
async fn check_components(&self, server: &Server, stats: &SystemStats) {
let lock = self.server_alert_status.lock().await;
if self.slack.is_none()
|| lock
|| self
.server_alert_status
.get(&server.id)
.await
.map(|s| s.component_alert)
.unwrap_or(false)
{
return;
}
drop(lock);
let info = stats
.components
.iter()
@@ -393,9 +399,11 @@ impl State {
info.join(" | "),
)
} else {
let mut lock = self.server_alert_status.lock().await;
let entry = lock.entry(server.id.clone()).or_default();
entry.component_alert = true;
self.server_alert_status
.update_entry(server.id.clone(), |entry| {
entry.component_alert = true;
})
.await;
}
}
}
@@ -487,7 +495,7 @@ impl State {
);
}
{
self.server_alert_status.lock().await.clear();
self.server_alert_status.clear().await;
}
}
}

View File

@@ -1,30 +1,31 @@
use std::{collections::HashMap, sync::Arc};
use std::sync::Arc;
use async_timing_util::{unix_timestamp_ms, wait_until_timelength, Timelength, ONE_HOUR_MS};
use axum::Extension;
use db::DbClient;
use futures_util::future::join_all;
use mungos::doc;
use mungos::mongodb::bson::doc;
use periphery::PeripheryClient;
use tokio::sync::Mutex;
use types::{BuildActionState, CoreConfig, DeploymentActionState, ServerActionState};
use types::{
BuildActionState, CommandActionState, CoreConfig, DeploymentActionState, ServerActionState,
};
use crate::{monitoring::AlertStatus, ws::update::UpdateWsChannel};
use crate::{helpers::Cache, monitoring::AlertStatus, ws::update::UpdateWsChannel};
pub type StateExtension = Extension<Arc<State>>;
pub type ActionStateMap<T> = Mutex<HashMap<String, T>>;
// pub type Cache<T> = RwLock<HashMap<String, T>>;
pub struct State {
pub config: CoreConfig,
pub db: DbClient,
pub update: UpdateWsChannel,
pub periphery: PeripheryClient,
pub slack: Option<slack::Client>,
pub build_action_states: ActionStateMap<BuildActionState>,
pub deployment_action_states: ActionStateMap<DeploymentActionState>,
pub server_action_states: ActionStateMap<ServerActionState>,
pub server_alert_status: Mutex<HashMap<String, AlertStatus>>, // (server_id, AlertStatus)
pub build_action_states: Cache<BuildActionState>,
pub deployment_action_states: Cache<DeploymentActionState>,
pub server_action_states: Cache<ServerActionState>,
pub command_action_states: Cache<CommandActionState>,
pub server_alert_status: Cache<AlertStatus>, // (server_id, AlertStatus)
}
impl State {
@@ -38,6 +39,7 @@ impl State {
build_action_states: Default::default(),
deployment_action_states: Default::default(),
server_action_states: Default::default(),
command_action_states: Default::default(),
server_alert_status: Default::default(),
};
let state = Arc::new(state);

View File

@@ -6,9 +6,9 @@ use axum::{
};
use futures_util::{SinkExt, StreamExt};
use helpers::handle_anyhow_error;
use mungos::Deserialize;
use serde::Deserialize;
use tokio::select;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::tungstenite::Message as TungsteniteMessage;
use tokio_util::sync::CancellationToken;
use types::{traits::Permissioned, PermissionLevel, SystemStatsQuery};
@@ -67,7 +67,7 @@ pub async fn ws_handler(
},
stats = stats_recv.next() => stats,
};
if let Some(Ok(Message::Text(msg))) = stats {
if let Some(Ok(TungsteniteMessage::Text(msg))) = stats {
let _ = ws_sender.send(AxumMessage::Text(msg)).await;
} else {
let _ = stats_recv.close(None).await;

View File

@@ -140,6 +140,12 @@ async fn user_can_see_update(
.await?;
(permissions, "group")
}
UpdateTarget::Command(command_id) => {
let permissions = db_client
.get_user_permission_on_command(user_id, command_id)
.await?;
(permissions, "command")
}
UpdateTarget::System => {
return Err(anyhow!("user not admin, can't recieve system updates"))
}

View File

@@ -1,56 +0,0 @@
# building images
Monitor builds docker images by cloning the source repository from Github, running ```docker build```, and pushing the resulting image to docker hub. Any repo containing a 'Dockerfile' is buildable using this method.
Build configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](https://github.com/mbecker20/monitor/blob/main/docs/paths.md).
## repo configuration
To specify the github repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo.
Many repos are private, in this case a Github access token is required in the periphery.config.toml of the building server. these are specified in the config like ```username = "access_token"```. An account which has access to the repo and is available on the periphery server can be selected to use via the *github account* dropdown menu.
## docker build configuration
In order to docker build, monitor just needs to know the build directory and the path of the Dockerfile, you can configure these in the *build config* section.
If the build directory is the root of the repository, you pass the build path as ```.```. If the build directory is some folder of the repo, just pass the name of the the folder. Do not pass the preceding "/". for example ```build/directory```
The dockerfile's path is given relative to the build directory. So if your build directory is ```build/directory``` and the dockerfile is in ```build/directory/Dockerfile.example```, you give the dockerfile path simply as ```Dockerfile.example```.
Just as with private repos, you will need to select a docker account to use with ```docker push```.
## running a pre build command
Sometimes a command needs to be run before running ```docker build```, you can configure this in the *pre build* section.
There are two fields to pass for *pre build*. the first is *path*, which changes the working directory. To run the command in the root of the repo, just pass ```.```. The second field is *command*, this is the shell command to be executed after the repo is cloned.
For example, say your repo had a folder in it called ```scripts``` with a shell script ```on-clone.sh```. You would give *path* as ```scripts``` and command as ```sh on-clone.sh```. Or you could make *path* just ```.``` and then the command would be ```sh scripts/on-clone.sh```. Either way works fine.
## adding build args
The Dockerfile may make use of [build args](https://docs.docker.com/engine/reference/builder/#arg). Build args can be passed using the gui by pressing the ```edit``` button. They are passed in the menu just like in the would in a .env file:
```
BUILD_ARG1=some_value
BUILD_ARG2=some_other_value
```
## builder configuration
A builder is a machine running monitor periphery and docker. Any server connected to monitor can be chosen as the builder for a build.
Building on a machine running production software is usually not a great idea, as this process can use a lot of the system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Right now monitor supports AWS ec2 for this task.
### AWS builder
You can choose to build on AWS on the "builder" tab on the build's page. From here you can configure the AMI to use as a base to build the image. These must be configured in the monitor core configuration along with other information like defaults to use, AWS credentials, etc. This is explained on the [core setup page](https://github.com/mbecker20/monitor/blob/main/docs/setup.md).
## versioning
Monitor uses a major.minor.patch versioning scheme. Every build will auto increment the patch number, and push the image to docker hub with the version tag as well as the "latest" tag.
[next: deploying](https://github.com/mbecker20/monitor/blob/main/docs/deployments.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)

View File

@@ -1,5 +0,0 @@
# setting up monitor core
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)

20
docsite/.gitignore vendored Normal file
View File

@@ -0,0 +1,20 @@
# Dependencies
/node_modules
# Production
/build
# Generated files
.docusaurus
.cache-loader
# Misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*

41
docsite/README.md Normal file
View File

@@ -0,0 +1,41 @@
# Website
This website is built using [Docusaurus 2](https://docusaurus.io/), a modern static website generator.
### Installation
```
$ yarn
```
### Local Development
```
$ yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
### Build
```
$ yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
### Deployment
Using SSH:
```
$ USE_SSH=true yarn deploy
```
Not using SSH:
```
$ GIT_USER=<Your GitHub username> yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.

3
docsite/babel.config.js Normal file
View File

@@ -0,0 +1,3 @@
module.exports = {
presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
};

View File

@@ -0,0 +1,42 @@
import Divider from '@site/src/components/Divider';
# api secrets
these routes are used to manage api secrets.
| name | route |
| ---- | ------ |
| [create api secret](/api/api-secrets#create-api-secret) | `POST /api/secret/create` |
| [delete api secret](/api/api-secrets#delete-api-secret) | `DELETE /api/secret/delete/<secret-name>` |
```mdx-code-block
<Divider />
```
## create api secret
`POST /api/secret/create`
### request body
```json
{
name: string, // name the secret. must be unique among the users secrets
expires?: rfc3339_timestamp, // optional expiry time. if none, the secret will not expire.
}
```
### response body
```json
string // the body will be the secret hash used to log in.
```
```mdx-code-block
<Divider />
```
## delete api secret
`DELETE /api/secret/delete/<secret-name>`
### response
```json
HTTP 200 OK
```

View File

@@ -0,0 +1,8 @@
# authenticating requests
monitor uses the `JSON Web Token (JWT)` standard to authenticate all requests to subroutes under `/api`.
users can acquire a `JWT` using a [login method](/api/login).
to authenticate requests, pass the `JWT` under the `Authorization` header:
`Authorization: Bearer <JWT>`

224
docsite/docs/api/build.mdx Normal file
View File

@@ -0,0 +1,224 @@
import Divider from '@site/src/components/Divider';
# build
these routes relate to interacting with monitor `builds`
| name | route |
| ---- | ------ |
| [list builds](/api/build#list-builds) | `GET /api/build/list` |
| [get build](/api/build#get-build) | `GET /api/build/<build_id>` |
| [get build action state](/api/build#get-build-action-state) | `GET /api/build/<build_id>/action_state` |
| [get build versions](/api/build#get-build-versions) | `GET /api/build/<build_id>/versions` |
| [create build](/api/build#create-build) | `POST /api/build/create` |
| [create full build](/api/build#create-full-build) | `POST /api/build/create_full` |
| [copy build](/api/build#copy-build) | `POST /api/build/<build_id>/copy` |
| [delete build](/api/build#delete-build) | `DELETE /api/build/<build_id>/delete` |
| [update build](/api/build#update-build) | `PATCH /api/build/update` |
| [build](/api/build#build-action) | `POST /api/build/<build_id>/build` |
| [get aws builder defaults](/api/build#get-aws-builder-defaults) | `GET /api/build/aws_builder_defaults` |
| [get allowed docker organizations](/api/build#get-allowed-docker-organizations) | `GET /api/build/docker_organizations` |
```mdx-code-block
<Divider />
```
## list builds
`GET /api/build/list`
this method will return an array of builds the requesting user has a minimum of `Read` permissions on.
### response body
Array<[Build](/api/types#build)>
```mdx-code-block
<Divider />
```
## get build
`GET /api/build/<build_id>`
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## get build action state
`GET /api/build/<build_id>/action_state`
this method returns the action state for the build, eg. whether the build is currently `building`.
### response body
```json
{
building: boolean,
updating: boolean,
}
```
```mdx-code-block
<Divider />
```
## get build versions
`GET /api/build/<build_id>/versions`
paginated route for fetching the most recent available versions of this build.
### query params
```json
page=number // optional, default is 0. pagination starting at page 0.
major=number // optional. filter by major version number
minor=number // optional. filter by minor version number
patch=number // optional. filter by patch version number
```
### response body
```json
[
{
ts: rfc3339_timestamp,
version: {
major: number,
minor: number,
patch: number,
}
},
...
]
```
```mdx-code-block
<Divider />
```
## create build
`POST /api/build/create`
### request body
```json
{
name: string,
}
```
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## create full build
`POST /api/build/create_full`
### request body
[Build](/api/types#build)
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## copy build
`POST /api/build/<build_id>/copy`
this method will create a copy of the build with a new _id and name,
with all the same configuration as the target build.
### request body
```json
{
name: string, // the new name
}
```
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## delete build
`DELETE /api/build/<build_id>/delete`
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## update build
`PATCH /api/build/update`
### request body
[Build](/api/types#build)
### response body
[Build](/api/types#build)
```mdx-code-block
<Divider />
```
## build (action)
`POST /api/build/<build_id>/build`
### response body
[Update](/api/types#update)
:::note
this update will include the `version` field.
:::
```mdx-code-block
<Divider />
```
## get aws builder defaults
`GET /api/build/aws_builder_defaults`
### response body
```json
{
default_ami_name: string,
default_subnet_id: string,
default_key_pair_name: string,
default_region: string,
default_volume_gb: number,
default_instance_type: string,
default_security_group_ids: string[],
default_assign_public_ip: boolean,
available_ami_accounts: [
{
ami_id: string,
github: string[],
docker: string[],
secrets: string[],
}
],
}
```
```mdx-code-block
<Divider />
```
## get allowed docker organizations
`GET /api/build/docker_organizations`
### response body
```json
string[] // the names of the allowed docker organizations
```

View File

@@ -0,0 +1,344 @@
import Divider from '@site/src/components/Divider';
# deployment
these routes relate to interacting with monitor `deployments`
| name | route |
| ---- | ------ |
| [list deployments](/api/deployment#list-deployments) | `GET /api/deployment/list` |
| [get deployment](/api/deployment#get-deployment) | `GET /api/deployment/<deployment_id>` |
| [get deployment action state](/api/deployment#get-deployment-action-state) | `GET /api/deployment/<deployment_id>/action_state` |
| [get deployment container log](/api/deployment#get-deployment-container-log) | `GET /api/deployment/<deployment_id>/log` |
| [get deployment container stats](/api/deployment#get-deployment-container-stats) | `GET /api/deployment/<deployment_id>/stats` |
| [get deployment deployed version](/api/deployment#get-deployment-deployed-version) | `GET /api/deployment/<deployment_id>/deployed_version` |
| [create deployment](/api/deployment#create-deployment) | `POST /api/deployment/create` |
| [create full deployment](/api/deployment#create-full-deployment) | `POST /api/deployment/create_full` |
| [copy deployment](/api/deployment#copy-deployment) | `POST /api/deployment/<deployment_id>/copy` |
| [delete deployment](/api/deployment#delete-deployment) | `DELETE /api/deployment/<deployment_id>/delete` |
| [update deployment](/api/deployment#update-deployment) | `PATCH /api/deployment/update` |
| [rename deployment](/api/deployment#rename-deployment) | `PATCH /api/deployment/<deployment_id>/rename` |
| [reclone deployment](/api/deployment#reclone-deployment) | `POST /api/deployment/<deployment_id>/reclone` |
| [pull deployment](/api/deployment#pull-deployment) | `POST /api/deployment/<deployment_id>/pull` |
| [deploy container](/api/deployment#deploy-container) | `POST /api/deployment/<deployment_id>/deploy` |
| [start container](/api/deployment#start-container) | `POST /api/deployment/<deployment_id>/start_container` |
| [stop container](/api/deployment#stop-container) | `POST /api/deployment/<deployment_id>/stop_container` |
| [remove container](/api/deployment#remove-container) | `POST /api/deployment/<deployment_id>/remove_container` |
```mdx-code-block
<Divider />
```
## list deployments
`GET /api/deployment/list`
this method will return an array of deployments with container state that the requesting user has a minimum of `Read` permissions on.
### response body
```json
[
{
deployment: Deployment,
state: DockerContainerState,
container?: {
name: string,
id: string,
image: string,
state: DockerContainerState,
status?: string,
}
},
...
]
```
```mdx-code-block
<Divider />
```
## get deployment
`GET /api/deployment/<deployment_id>`
this method will return the deployment with container state that
the requesting user has a minimum of `Read` permissions on.
it will return `500: Internal Server Error` if the user does not have the required permissions.
### response body
```json
{
deployment: Deployment,
state: DockerContainerState,
container?: {
name: string,
id: string,
image: string,
state: DockerContainerState,
status?: string,
}
}
```
```mdx-code-block
<Divider />
```
## get deployment action state
`GET /api/deployment/<deployment_id>/action_state`
this method returns the action state for the deployment, eg. whether the deployment is currently `deploying`.
### response body
```json
{
deploying: boolean,
stopping: boolean,
starting: boolean,
removing: boolean,
pulling: boolean,
recloning: boolean,
updating: boolean,
renaming: boolean,
}
```
```mdx-code-block
<Divider />
```
## get deployment container log
`GET /api/deployment/<deployment_id>/log`
this method is used to get the container's log associated with the deployment.
### query params
```json
{
tail: number // number of log lines to fetch. this is passed to the --tail flag of docker logs command
}
```
### response body
```json
{
stdout: string,
stderr: string,
}
```
```mdx-code-block
<Divider />
```
## get deployment container stats
`GET /api/deployment/<deployment_id>/stats`
this method returns the results of running `docker stats <container_name>`
for the container associated with the deployment.
### response body
```json
{
name: string,
cpu_perc: string,
mem_perc: string,
mem_usage: string,
net_io: string,
block_io: string,
pids: string,
}
```
```mdx-code-block
<Divider />
```
## get deployment deployed version
`GET /api/deployment/<deployment_id>/deployed_version`
this method is used to get the image version of the container associated with the deployment, if it exists.
otherwise, it will return the version specified in the deployment config.
### response body
```json
string // the deployed version like '0.2.4'
```
```mdx-code-block
<Divider />
```
## create deployment
`POST /api/deployment/create`
this method is used to create a new deployment on a particular server.
it will return the created deployment.
:::note
users must be **admin** or have `update` permissions on the server specified by the `server_id`
in the request body in order for this request to succeed.
:::
### request body
```json
{
name: string,
server_id: string,
}
```
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## create full deployment
`POST /api/deployment/create_full`
this method is used to create a new deployment on a particular server, already initialized with config.
it will return the created deployment
### request body
[Deployment](/api/types#deployment)
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## copy deployment
`POST /api/deployment/<deployment_id>/copy`
this method will create a copy of the deployment with a new _id and name,
with all the same configuration as the target deployment.
it can be used to move the deployment to another server.
### request body
```json
{
name: string,
server_id: string,
}
```
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## delete deployment
`DELETE /api/deployment/<deployment_id>/delete`
this method will delete the deployment. if a container is associated with the deployment, it will be destroyed.
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## update deployment
`PATCH /api/deployment/update`
### request body
[Deployment](/api/types#deployment)
### response body
[Deployment](/api/types#deployment)
```mdx-code-block
<Divider />
```
## rename deployment
`PATCH /api/deployment/<deployment_id>/rename`
### request body
```json
{
new_name: string,
}
```
```mdx-code-block
<Divider />
```
## reclone deployment
`POST /api/deployment/<deployment_id>/reclone`
if the deployment has a repo attached, this will reclone the repo,
including the on-clone and on-pull actions.
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## pull deployment
`POST /api/deployment/<deployment_id>/pull`
if the deployment has a repo attached, this will `git pull` in the repo,
including the on-pull action.
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## deploy container
`POST /api/deployment/<deployment_id>/deploy`
this will deploy the container corresponding to the deployments configuration.
if the container already exists, it will destroy it first.
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## start container
`POST /api/deployment/<deployment_id>/start_container`
this will run `docker start <container_name>` for the container
corresponding to the deployment
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## stop container
`POST /api/deployment/<deployment_id>/stop_container`
this will run `docker stop <container_name>` for the container
corresponding to the deployment
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## remove container
`POST /api/deployment/<deployment_id>/remove_container`
this will run `docker stop <container_name> && docker container rm <container_name>`
for the container corresponding to the deployment
### response body
[Update](/api/types#update)

View File

@@ -0,0 +1,11 @@
---
slug: /api
---
this section documents the rest and websocket api
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

103
docsite/docs/api/login.mdx Normal file
View File

@@ -0,0 +1,103 @@
import Divider from '@site/src/components/Divider';
# login
monitor supports local login (username and password), Oauth2 login (github and google),
and secret login (username and API secret key).
each method must be explicitly enabled in your monitor core config,
otherwise the api won't be available.
:::note
in order to login to an Oauth2 user's account programmatically,
you must [create an api secret](/api/api-secrets#create-api-secret) and login using [/auth/secret/login](/api/login#login-using-api-secret)
:::
| name | route |
| ---- | ------ |
| [get login options](/api/login#get-login-options) | `GET /auth/options` |
| [create local user account](/api/login#create-local-user-account) | `POST /auth/local/create_user` |
| [login local user account](/api/login#login-local-user-account) | `POST /auth/local/login` |
| [login using api secret](/api/login#login-using-api-secret) | `POST /auth/secret/login` |
```mdx-code-block
<Divider />
```
## get login options
`GET /auth/options`
this method is used to obtain the login options for monitor core
### response body
```json
{
local: boolean,
github: boolean,
google: boolean,
}
```
```mdx-code-block
<Divider />
```
## create local user account
`POST /auth/local/create_user`
this method will create a new local auth account with the provided **username** and **password**,
and return a `JWT` for the user to authenticate with.
### request body
```json
{
username: string,
password: string,
}
```
### response body
`<JWT token as string>`
:::caution
a user created with this method is, by default, `disabled`. a monitor admin must enable their account before they can access the API.
:::
```mdx-code-block
<Divider />
```
## login local user account
`POST /auth/local/login`
this method will authenticate a local users credentials and return a JWT if login is successful.
### request body
```json
{
username: string,
password: string,
}
```
### response body
`<JWT token as string>`
```mdx-code-block
<Divider />
```
## login using api secret
`POST /auth/secret/login`
this method will authenticate a users account of any kind using an api secret generated using [/api/secret/create](/api/api-secrets#create-api-secret)
### request body
```json
{
username: string,
secret: string,
}
```
### response body
`<JWT token as string>`

View File

@@ -0,0 +1,90 @@
import Divider from '@site/src/components/Divider';
# permissions
these routes relate to updating user permissions
:::note
these routes can only be called by **admin** users
:::
| name | route |
| ---- | ------ |
| [update user permissions on target](/api/permissions#update-user-permissions-on-target) | `POST /api/permissions/update` |
| [modify user enabled](/api/permissions#modify-user-enabled) | `POST /api/permissions/modify_enabled` |
| [modify user create server permissions](/api/permissions#modify-user-create-server-permissions) | `POST /api/permissions/modify_create_server` |
| [modify user create build permissions](/api/permissions#modify-user-create-build-permissions) | `POST /api/permissions/modify_create_build` |
```mdx-code-block
<Divider />
```
## update user permissions on target
`POST /api/permissions/update`
### request body
```json
{
user_id: string, // the target users id
permission: "none" | "read" | "execute" | "update",
target_type: "server" | "deployment" | "build" | "procedure" | "group",
target_id: string, // the target resources id
}
```
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## modify user enabled
`POST /api/permissions/modify_enabled`
### request body
```json
{
user_id: string, // the target users id
enabled: boolean,
}
```
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## modify user create server permissions
`POST /api/permissions/modify_create_server`
### request body
```json
{
user_id: string, // the target users id
create_server_permissions: boolean,
}
```
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## modify user create build permissions
`POST /api/permissions/modify_create_build`
### request body
```json
{
user_id: string, // the target users id
create_build_permissions: boolean,
}
```
### response body
[Update](/api/types#update)

View File

@@ -0,0 +1,10 @@
import Divider from '@site/src/components/Divider';
# procedure
these routes relate to interacting with monitor `procedures`
```mdx-code-block
<Divider />
```

473
docsite/docs/api/server.mdx Normal file
View File

@@ -0,0 +1,473 @@
import Divider from '@site/src/components/Divider';
# server
these routes relate to interacting with monitor `servers`
| name | route |
| ---- | ------ |
| [list servers](/api/server#list-servers) | `GET /api/server/list` |
| [get server](/api/server#get-server) | `GET /api/server/<server_id>` |
| [get server action state](/api/server#get-server-action-state) | `GET /api/server/<server_id>/action_state` |
| [get server github accounts](/api/server#get-server-github-accounts) | `GET /api/server/<server_id>/github_accounts` |
| [get server docker accounts](/api/server#get-server-docker-accounts) | `GET /api/server/<server_id>/docker_accounts` |
| [get server available secrets](/api/server#get-server-available-secrets) | `GET /api/server/<server_id>/secrets` |
| [create server](/api/server#create-server) | `POST /api/server/create` |
| [create full server](/api/server#create-full-server) | `POST /api/server/create_full` |
| [delete server](/api/server#delete-server) | `DELETE /api/server/<server_id>/delete` |
| [update server](/api/server#update-server) | `PATCH /api/server/update` |
| [get server periphery version](/api/server#get-server-periphery-version) | `GET /api/server/<server_id>/version` |
| [get server system information](/api/server#get-server-system-information) | `GET /api/server/<server_id>/system_information` |
| [get server stats](/api/server#get-server-stats) | `GET /api/server/<server_id>/stats` |
| [get server stats history](/api/server#get-server-stats-history) | `GET /api/server/<server_id>/stats/history` |
| [get server stats at time](/api/server#get-server-stats-at-time) | `GET /api/server/<server_id>/stats/at_ts` |
| [get docker networks](/api/server#get-docker-networks) | `GET /api/server/<server_id>/networks` |
| [prune docker networks](/api/server#prune-docker-networks) | `POST /api/server/<server_id>/networks/prune` |
| [get docker images](/api/server#get-docker-images) | `GET /api/server/<server_id>/images` |
| [prune docker images](/api/server#prune-docker-images) | `POST /api/server/<server_id>/images/prune` |
| [get docker containers](/api/server#get-docker-containers) | `GET /api/server/<server_id>/containers` |
| [prune docker containers](/api/server#prune-docker-containers) | `POST /api/server/<server_id>/containers/prune` |
```mdx-code-block
<Divider />
```
## list servers
`GET /api/server/list`
this method will return an array of servers with their status
that the requesting user has a minimum of `Read` permissions on.
### response body
```json
[
{
server: Server,
status: ServerStatus
},
...
]
```
```mdx-code-block
<Divider />
```
## get server
`GET /api/server/<server_id>`
this method will return the server with server status that
the requesting user has a minimum of `Read` permissions on.
it will return `500: Internal Server Error` if the user does not have the required permissions.
### response body
```json
{
server: Server,
status: ServerStatus
}
```
```mdx-code-block
<Divider />
```
## get server action state
`GET /api/server/<server_id>/action_state`
this method returns the action state for the server, eg. whether the server is currently `pruning_images`.
### response body
```json
{
pruning_networks: boolean,
pruning_containers: boolean,
pruning_images: boolean,
}
```
```mdx-code-block
<Divider />
```
## get server github accounts
`GET /api/server/<server_id>/github_accounts`
this method returns a list of all the github account usernames that are available on the server,
as defined in the server's periphery config under [github_accounts].
### response body
```json
["<github_username_1>", "<github_username_2>", ...]
```
```mdx-code-block
<Divider />
```
## get server docker accounts
`GET /api/server/<server_id>/docker_accounts`
this method returns a list of all the docker account usernames that are available on the server,
as defined in the server's periphery config under [docker_accounts].
### response body
```json
["<docker_username_1>", "<docker_username_2>", ...]
```
```mdx-code-block
<Divider />
```
## get server available secrets
`GET /api/server/<server_id>/secrets`
this method returns a list of all the secret keys that are available on the server,
as defined in the server's periphery config under [secrets].
### response body
```json
["<secret_key_1>", "<secret_key_2>", ...]
```
```mdx-code-block
<Divider />
```
## create server
`POST /api/server/create`
### request body
```json
{
name: string,
address: string, // eg. http://12.34.56.78:8000
}
```
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## create full server
`POST /api/server/create_full`
this method is used to create a new server, already initialized with config.
it will return the created server.
### request body
[Server](/api/types#server)
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## delete server
`DELETE /api/server/<server_id>/delete`
this method will delete the server, along with all deployments attached to the server.
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## update server
`PATCH /api/server/update`
this method is used to update a servers configuration.
### request body
[Server](/api/types#server)
### response body
[Server](/api/types#server)
```mdx-code-block
<Divider />
```
## get server periphery version
`GET /api/server/<server_id>/version`
this method is used to get the version of the periphery binary running on the server.
### response body
```json
string // the periphery version
```
```mdx-code-block
<Divider />
```
## get server system information
`GET /api/server/<server_id>/system_information`
this method gets some information about the host system running the periphery binary.
### response body
```json
{
name?: string, // the name of the system
os?: string, // the os the system is running
kernel?: string, // the version of the kernel
core_count?: number, // number of cores in the cpu
host_name?: string, // host name of the system
cpu_brand: string, // information on the cpu of the system
}
```
```mdx-code-block
<Divider />
```
## get server stats
`GET /api/server/<server_id>/stats`
this method retrieves current system stats of the server.
### query params
```json
cpus=boolean // optional. if true, response will include information about each core individually
disks=boolean // optional. if true, response will include breakdown of disk usage by mount point
networks=boolean // optional. if true, response will include info on network usage
components=boolean // optional. if true, response will include component tempurature
processes=boolean // optional. if true, response will include all system processes running on host and their resource usage
```
### response body
```json
{
system_load: number,
cpu_perc: number,
cpu_freq_mhz: number,
mem_used_gb: number,
mem_total_gb: number,
disk: {},
cpus: [],
networks: [],
components: [],
processes: [],
polling_rate: Timelength,
refresh_ts: number,
refresh_list_ts: number,
}
```
```mdx-code-block
<Divider />
```
## get server stats history
`GET /api/server/<server_id>/stats/history`
this method will return historical system stats for the server.
the response is paginated, to get older data, specify a higher page number.
### query params
```json
interval=Timelength // optional, default interval is 1-hr. controls granularity of historical data
limit=number // optional, default is 100, max is 500. specifies the number of data points to fetch
page=number // optional, default is 0. specifies the page of data, going backward in time.
networks=boolean // optional. if true, response will include historical info on network usage
components=boolean // optional. if true, response will include historical component tempuratures
```
### response body
```json
[
{
ts: number, // unix timestamp in ms
server_id: string // specifies the server
system_load: number,
cpu_perc: number,
cpu_freq_mhz: number,
mem_used_gb: number,
mem_total_gb: number,
disk: {},
cpus: [],
networks: [],
components: [],
processes: [],
polling_rate: Timelength,
},
...
]
```
```mdx-code-block
<Divider />
```
## get server stats at time
`GET /api/server/<server_id>/stats/at_ts`
this method retrieves the historical stats for a server at a specific timestamp
### query params
```json
ts=number // required. the timestamp in ms
```
### response body
```json
{
ts: number, // unix timestamp in ms
server_id: string // specifies the server
system_load: number,
cpu_perc: number,
cpu_freq_mhz: number,
mem_used_gb: number,
mem_total_gb: number,
disk: {},
cpus: [],
networks: [],
components: [],
processes: [],
polling_rate: Timelength,
}
```
```mdx-code-block
<Divider />
```
## get docker networks
`GET /api/server/<server_id>/networks`
this method retrieves the docker networks on the server
### response body
```json
[
{
Name?: string,
Id?: string,
Created?: string,
Scope?: string,
Driver?: string,
EnableIPv6?: boolean,
IPAM?: {
Driver?: string,
Config?: [
{
Subnet?: string,
IPRange?: string,
Gateway?: string,
AuxiliaryAddresses?: {}
},
...
],
Options?: {}
},
Internal?: boolean,
Attachable?: boolean,
Ingress?: boolean,
Containers?: {},
Options?: {},
Labels?: {}
},
...
]
```
```mdx-code-block
<Divider />
```
## prune docker networks
`POST /api/server/<server_id>/networks/prune`
this method triggers the `network prune` action on the server, which runs
`docker network prune -f` on the target server
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## get docker images
`GET /api/server/<server_id>/images`
this method will return a list of images available locally on the server
### response body
```json
[
{
Id: string,
ParentId: string,
RepoTags: [string],
RepoDigests: [string],
Created: number,
Size: number,
SharedSize: number,
VirtualSize: number,
Labels: {},
Containers: number,
}
]
```
```mdx-code-block
<Divider />
```
## prune docker images
`POST /api/server/<server_id>/images/prune`
this method triggers the `image prune` action, which runs
`docker image prune -a -f` on the target server
### response body
[Update](/api/types#update)
```mdx-code-block
<Divider />
```
## get docker containers
`GET /api/server/<server_id>/containers`
this method is used to retrieve information about all the containers on the target server
### response body
```json
[
{
name: string,
id: string,
image: string,
state: DockerContainerState,
status?: string,
},
...
]
```
```mdx-code-block
<Divider />
```
## prune docker containers
`POST /api/server/<server_id>/containers/prune`
this method triggers the `container prune` action, which runs
`docker container prune -f` on the target server
### response body
[Update](/api/types#update)

283
docsite/docs/api/types.mdx Normal file
View File

@@ -0,0 +1,283 @@
import Divider from "@site/src/components/Divider";
# types
these types are used across the monitor api, defined using `typescript`. they are referenced throughout the api docs.
```mdx-code-block
<Divider />
```
## build
```typescript
interface Build {
_id?: {
$oid: string;
};
name: string;
description?: string;
permissions?: {
[user_id: string]: PermissionLevel;
};
skip_secret_interp?: boolean;
server_id?: string;
aws_config?: {
region?: string;
instance_type?: string;
ami_name?: string;
volume_gb?: number;
subnet_id?: string;
security_group_ids?: string[];
key_pair_name?: string;
assign_public_ip?: boolean;
};
version: {
major: number;
minor: number;
patch: number;
};
repo?: string;
branch?: string;
github_account?: string;
pre_build?: {
path?: string;
command?: string;
};
docker_build_args?: {
build_path: string;
dockerfile_path?: string;
build_args?: Array<{
variable: string;
value: string;
}>;
extra_args?: string[];
use_buildx?: boolean;
};
docker_account?: string;
docker_organization?: string;
last_built_at?: string;
created_at?: string;
updated_at?: string;
}
```
```mdx-code-block
<Divider />
```
## deployment
```typescript
interface Deployment {
_id?: {
$oid: string;
};
name: string;
description?: string;
server_id: string;
permissions?: PermissionLevel;
skip_secret_interp?: boolean;
docker_run_args: {
image: string;
ports?: Array<{
local: string;
container: string;
}>;
volumes?: Array<{
local: string;
container: string;
}>;
environment?: Array<{
variable: string;
value: string;
}>;
network?: string;
restart?: "no" | "on-failure" | "always" | "unless-stopped";
post_image?: string;
container_user?: string;
extra_args?: string[];
docker_account?: string;
};
build_id?: string;
build_version?: {
major: number;
minor: number;
patch: number;
};
repo?: string;
branch?: string;
github_account?: string;
on_clone?: {
path?: string;
command?: string;
};
on_pull?: {
path?: string;
command?: string;
};
repo_mount?: {
local: string;
container: string;
};
created_at?: string;
updated_at?: string;
}
```
```mdx-code-block
<Divider />
```
## server
```typescript
interface Server {
_id?: string;
name: string;
description?: string;
address: string;
permissions?: {
[user_id: string]: PermissionLevel;
};
enabled: boolean;
to_notify?: string[];
auto_prune?: boolean;
cpu_alert?: number;
mem_alert?: number;
disk_alert?: number;
stats_interval?: Timelength;
region?: string;
instance_id?: string;
created_at?: string;
updated_at?: string;
}
```
```mdx-code-block
<Divider />
```
## update
```typescript
interface Update {
_id?: string;
target: {
type: "System" | "Build" | "Deployment" | "Server" | "Procedure" | "Group";
id?: string;
};
operation: Operation;
logs: Array<{
stage: string;
command: string;
stdout: string;
stderr: string;
success: boolean;
start_ts: string;
end_ts: string;
}>;
start_ts: string;
end_ts?: string;
status: "queued" | "in_progress" | "complete";
success: boolean;
operator: string;
version?: {
major: number;
minor: number;
patch: number;
};
}
```
```mdx-code-block
<Divider />
```
## operation
```typescript
enum Operation {
None = "none",
CreateServer = "create_server",
UpdateServer = "update_server",
DeleteServer = "delete_server",
PruneImagesServer = "prune_images_server",
PruneContainersServer = "prune_containers_server",
PruneNetworksServer = "prune_networks_server",
RenameServer = "rename_server",
CreateBuild = "create_build",
UpdateBuild = "update_build",
DeleteBuild = "delete_build",
BuildBuild = "build_build",
CreateDeployment = "create_deployment",
UpdateDeployment = "update_deployment",
DeleteDeployment = "delete_deployment",
DeployContainer = "deploy_container",
StopContainer = "stop_container",
StartContainer = "start_container",
RemoveContainer = "remove_container",
PullDeployment = "pull_deployment",
RecloneDeployment = "reclone_deployment",
RenameDeployment = "rename_deployment",
CreateProcedure = "create_procedure",
UpdateProcedure = "update_procedure",
DeleteProcedure = "delete_procedure",
CreateGroup = "create_group",
UpdateGroup = "update_group",
DeleteGroup = "delete_group",
ModifyUserEnabled = "modify_user_enabled",
ModifyUserCreateServerPermissions = "modify_user_create_server_permissions",
ModifyUserCreateBuildPermissions = "modify_user_create_build_permissions",
ModifyUserPermissions = "modify_user_permissions",
AutoBuild = "auto_build",
AutoPull = "auto_pull",
}
```
```mdx-code-block
<Divider />
```
## permission level
```typescript
enum PermissionLevel {
None = "none",
Read = "read",
Execute = "execute",
Update = "update",
}
```
```mdx-code-block
<Divider />
```
## timelength
```typescript
enum Timelength {
OneSecond = "1-sec",
FiveSeconds = "5-sec",
TenSeconds = "10-sec",
FifteenSeconds = "15-sec",
ThirtySeconds = "30-sec",
OneMinute = "1-min",
TwoMinutes = "2-min",
FiveMinutes = "5-min",
TenMinutes = "10-min",
FifteenMinutes = "15-min",
ThirtyMinutes = "30-min",
OneHour = "1-hr",
TwoHours = "2-hr",
SixHours = "6-hr",
EightHours = "8-hr",
TwelveHours = "12-hr",
OneDay = "1-day",
ThreeDay = "3-day",
OneWeek = "1-wk",
TwoWeeks = "2-wk",
ThirtyDays = "30-day",
}
```

View File

View File

@@ -0,0 +1,9 @@
# select a builder
A builder is a machine running monitor periphery and docker. Any server connected to monitor can be chosen as the builder for a build.
Building on a machine running production software is usually not a great idea, as this process can use a lot of system resources. It is better to start up a temporary cloud machine dedicated for the build, then shut it down when the build is finished. Right now monitor supports AWS ec2 for this task.
### AWS builder
You can choose to build on AWS on the "builder" tab on the build's page. From here you can select preconfigured AMIs to use as a base to build the image. These must be configured in the monitor core configuration along with other information like defaults to use, AWS credentials, etc. This is explained on the [core setup page](https://github.com/mbecker20/monitor/blob/main/docs/setup.md).

View File

@@ -0,0 +1,29 @@
# configuration
monitor just needs a bit of information in order to build your image.
### repo configuration
To specify the github repo to build, just give it the name of the repo and the branch under *repo config*. The name is given like ```mbecker20/monitor```, it includes the username / organization that owns the repo.
Many repos are private, in this case a Github access token is required in the periphery.config.toml of the building server. these are specified in the config like ```username = "access_token"```. An account which has access to the repo and is available on the periphery server can be selected to use via the *github account* dropdown menu.
### docker build configuration
In order to docker build, monitor just needs to know the build directory and the path of the Dockerfile relative to the repo, you can configure these in the *build config* section.
If the build directory is the root of the repository, you pass the build path as ```.```. If the build directory is some folder of the repo, just pass the name of the the folder. Do not pass the preceding "/". for example ```build/directory```
The dockerfile's path is given relative to the build directory. So if your build directory is ```build/directory``` and the dockerfile is in ```build/directory/Dockerfile.example```, you give the dockerfile path simply as ```Dockerfile.example```.
Just as with private repos, you will need to select a docker account to use with ```docker push```.
### adding build args
The Dockerfile may make use of [build args](https://docs.docker.com/engine/reference/builder/#arg). Build args can be passed using the gui by pressing the ```edit``` button. They are passed in the menu just like in the would in a .env file:
```
BUILD_ARG1=some_value
BUILD_ARG2=some_other_value
```
Note that these values are visible in the final image using ```docker history```, so shouldn't be used to pass build time secrets. Use [secret mounts](https://docs.docker.com/engine/reference/builder/#run---mounttypesecret) for this instead.

View File

@@ -0,0 +1,15 @@
---
slug: /build-images
---
# building images
Monitor builds docker images by cloning the source repository from Github, running ```docker build```, and pushing the resulting image to docker hub. Any repo containing a ```Dockerfile``` is buildable using this method.
Build configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/file-paths).
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

View File

@@ -0,0 +1,7 @@
# pre build command
Sometimes a command needs to be run before running ```docker build```, you can configure this in the *pre build* section.
There are two fields to pass for *pre build*. the first is *path*, which changes the working directory. To run the command in the root of the repo, just pass ```.```. The second field is *command*, this is the shell command to be executed after the repo is cloned.
For example, say your repo had a folder in it called ```scripts``` with a shell script ```on-clone.sh```. You would give *path* as ```scripts``` and command as ```sh on-clone.sh```. Or you could make *path* just ```.``` and then the command would be ```sh scripts/on-clone.sh```. Either way works fine.

View File

@@ -0,0 +1,3 @@
# versioning
Monitor uses a major.minor.patch versioning scheme. Every build will auto increment the patch number, and push the image to docker hub with the version tag as well as the ```latest``` tag.

View File

@@ -0,0 +1,7 @@
# adding the server to monitor
The easiest way to add the server is with the GUI. On the home page, click the ```+``` button to the right of the server search bar, configure the name and address of the server. The address is the full http/s url to the periphery server, eg ```http://12.34.56.78:8000```.
Once it is added, you can use access the GUI to modify some config, like the alerting thresholds for cpu, memory and disk usage. A server can also be temporarily disabled, this will prevent alerting if it goes offline.
Since no state is stored on the periphery servers, you can easily redirect all deployments to be hosted on a different server. Just update the address to point to the new server.

View File

@@ -0,0 +1,16 @@
---
slug: /connecting-servers
---
# connecting servers
Integrating a device into the monitor system has 2 steps:
1. Setup and start the periphery agent on the server
2. Adding the server to monitor via the core API
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

View File

@@ -1,11 +1,4 @@
# connecting servers
Integrating a device into the monitor system has 2 steps:
1. Setup and start the periphery agent on the server
2. Adding the server to monitor via the core API
## setup monitor periphery
# setup monitor periphery
The easiest way to do this is to follow the [monitor guide](https://github.com/mbecker20/monitor-guide). This is a repo containing directions and scripts enabling command line installation via ssh or remotely.
@@ -13,7 +6,7 @@ The easiest way to do this is to follow the [monitor guide](https://github.com/m
1. Download the periphery binary from the latest [release](https://github.com/mbecker20/monitor/releases).
2. Create and edit your config files, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). The monitor cli can be used to add the boilerplate: ```monitor periphery gen-config --path /path/to/config.toml```. The files can be anywhere, and can be passed to periphery via the ```--config-path``` flag.
2. Create and edit your config files, following the [config example](https://github.com/mbecker20/monitor/blob/main/config_example/periphery.config.example.toml). The monitor cli can be used to add the boilerplate: ```monitor periphery gen-config --path /path/to/config.toml```. The files can be anywhere, and can be passed to periphery via the ```--config-path``` argument.
3. Ensure that inbound connectivity is allowed on the port specified in periphery.config.toml (default 8000).
@@ -21,36 +14,30 @@ The easiest way to do this is to follow the [monitor guide](https://github.com/m
5. Start the periphery binary with your preferred process manager, like systemd. The config read from the file is printed on startup, ensure that it is as expected.
## example periphery start command
### example periphery start command
```
periphery \
--config-path /path/to/periphery.config.base.toml \
--config-path /other_path/to/periphery.config.overide.toml \
--config-path /other_path/to/overide-periphery-config-directory \
--config-keyword periphery \
--config-keyword config \
--merge-nested-config \
--home_dir /home/username
```
## passing config files
### passing config files
when you pass multiple config files, later --config-path given in the command will always overide previous ones.
Either file paths or directory paths can be passed to ```--config-path```.
there are two ways to merge config files. The default behavior is to completely replace any base fields with whatever fields are present in the overide config. So if you pass ```allowed_ips = []``` in your overide config, the final allowed_ips will be an empty list as well.
When using directories, the file entries can be filtered by name with the ```--config-keyword``` argument, which can be passed multiple times to add more keywords. If passed, then only config files with file names that contain all keywords will be merged.
When passing multiple config files, later --config-path given in the command will always overide previous ones. Directory config files are merged in alphabetical order by name, so ```config_b.toml``` will overide ```config_a.toml```.
There are two ways to merge config files. The default behavior is to completely replace any base fields with whatever fields are present in the overide config. So if you pass ```allowed_ips = []``` in your overide config, the final allowed_ips will be an empty list as well.
```--merge-nested-config``` will merge config fields recursively and extend config array fields.
For example, with ```--merge-nested-config``` you can specify an allowed ip in the base config, and another in the overide config, they will both be present in the final config.
Similarly, you can specify a base docker / github account pair, and extend them with additional accounts in the overide config.
## adding the server to monitor
The easiest way to add the server is with the GUI. On the home page, click the + button to the right of the server search bar, configure the name and address of the server. The address is the full http/s url to the periphery server, eg http://12.34.56.78:8000.
Once it is added, you can use access the GUI to modify some config, like the alerting thresholds for cpu, memory and disk usage. A server can also be temporarily disabled, this will prevent alerting if it goes offline.
Since no state is stored on the periphery servers, you can easily redirect all deployments to be hosted on a different server. Just update the address to point to the new server.
[next: building](https://github.com/mbecker20/monitor/blob/main/docs/builds.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)
Similarly, you can specify a base docker / github account pair, and extend them with additional accounts in the overide config.

View File

@@ -0,0 +1,40 @@
# core setup
setting up monitor core is fairly simple. there are some requirements to run monitor core:
- a valid configuration file
- an instance of MongoDB to which monitor core can connect
- docker must be installed on the host
## 1. create the configuration file
create a configuration file on the system, for example at `~/.monitor/core.config.toml`, and copy the [example config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml). fill in all the necessary information before continuing.
:::note
to enable OAuth2 login, you must create a client on the respective OAuth provider,
for example [google](https://developers.google.com/identity/protocols/oauth2)
or [github](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps).
monitor uses the `web application` login flow.
the redirect uri is `<base_url>/auth/google/callback` for google and `<base_url>/auth/github/callback` for github.
:::
## 2. start monitor core
monitor core is distributed via dockerhub under the public repo [mbecker2020/monitor_core](https://hub.docker.com/r/mbecker2020/monitor_core).
```sh
docker run -d --name monitor-core \
-v $HOME/.monitor/core.config.toml:/config/config.toml \
-p 9000:9000 \
mbecker2020/monitor_core
```
## first login
monitor core should now be accessible on the specified port, so navigating to `http://<address>:<port>` will display the login page.
the first user to log in will be auto enabled and made admin. any additional users to create accounts will be disabled by default.
## https
monitor core itself only supports http, so a reverse proxy like [caddy](https://caddyserver.com/) should be used for https

View File

@@ -1,12 +1,8 @@
# deploying applications
# configuration
Monitor can deploy any docker images that it can access with the configured docker accounts. It works by parsing the deployment configuration into a ```docker run``` command. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well.
## choose the docker image
Deployment configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](https://github.com/mbecker20/monitor/blob/main/docs/paths.md).
## configuring the image
There are two options to configure the deployed image.
There are two options to configure the docker image to deploy.
### attaching a monitor build
If the software you want to deploy is built by monitor, you can attach the build directly to the deployment.
@@ -84,20 +80,4 @@ Sometimes you need to specify some flags to be passed directly to the applicatio
docker run -d --name mongo-db mongo:6.0.3 --quiet
```
In order to achieve this with monitor, just pass ```--quiet``` to 'post image'.
## container lifetime management
The lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. The lifetime management actions monitor presents to the user is relative to the containers state. For example, when the container is ```running```, you can either stop it, destroy it, or redeploy it.
### stopping a container
Sometimes you want to stop a running application but preserve its logs and configuration, either to be restarted later or to view the logs at a later time. It is more like *pausing* the application with its current config, as no configuration (like environment variable, volume mounts, etc.) will be changed when the container is started again. In order to restart an application with updated configuration, it must be *redeployed*.
### container redeploy
redeploying is the action of destroying a container and recreating it. If you update deployment config, these changes will not take effect until the container is redeployed. Just note this will destroy the previous containers logs along with the container itself.
[next: permissions](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)
In order to achieve this with monitor, just pass ```--quiet``` to 'post image'.

View File

@@ -0,0 +1,11 @@
# deploy containers
Monitor can deploy any docker images that it can access with the configured docker accounts. It works by parsing the deployment configuration into a ```docker run``` command, which is then run on the target system. The configuration is stored on MongoDB, and records of all actions (update config, deploy, stop, etc.) are stored as well.
Deployment configuration involves passing file / directory paths, for more details about passing file paths, see the [file paths doc](/file-paths).
```mdx-code-block
import DocCardList from '@theme/DocCardList';
<DocCardList />
```

View File

@@ -0,0 +1,15 @@
# container management
the lifetime of a docker container is more like a virtual machine. They can be created, started, stopped, and destroyed. monitor will display the state of the container and provides an API to manage all your container's lifetimes.
this is achieved internally by running the appropriate docker command for the requested action (docker stop, docker start, etc).
### stopping a container
sometimes you want to stop a running application but preserve its logs and configuration, either to be restarted later or to view the logs at a later time. It is more like *pausing* the application with its current config, as no configuration (like environment variable, volume mounts, etc.) will be changed when the container is started again.
note that in order to restart an application with updated configuration, it must be *redeployed*. stopping and starting a container again will keep all configuration as it was when the container was initially created.
### container redeploy
redeploying is the action of destroying a container and recreating it. If you update deployment config, these changes will not take effect until the container is redeployed. Just note this will destroy the previous containers logs along with the container itself.

View File

@@ -1,10 +1,10 @@
# File Paths
# file paths
When working with monitor, you might have to configure file or directory paths.
when working with monitor, you might have to configure file or directory paths.
## Relative Paths
## relative paths
Where possible, it is better to use relative file paths. Using relative file paths removes the connection between the process being run and the particular server it runs one, making it easier to move things between servers.
Where possible, it is better to use relative file paths. Using relative file paths removes the connection between the process being run and the particular server it runs on, making it easier to move things between servers.
Where you see relative paths:
@@ -20,19 +20,12 @@ There are 3 kinds of paths to pass:
1. to specify the root of the repo, use ```.``` as the path
2. to specify a folder in the repo, pass it with **no** preceding ```/```. For example, ```example_folder``` or ```folder1/folder2```
3. to specify an absolute path on the servers filesystem, use a preceding slash, eg. ```/home/ubuntu/example```. This way should only be used if absolutely necessary.
3. to specify an absolute path on the servers filesystem, use a preceding slash, eg. ```/home/ubuntu/example```. This way should only be used if absolutely necessary, like when passing host paths when configuring docker volumes.
### Implementation
### implementation
relative file paths are joined with the path of the repo on the system using a Rust [PathBuf](https://doc.rust-lang.org/std/path/struct.PathBuf.html#method.push).
## Docker Volume Paths
These are passed directly to the Docker CLI using ```--volume /path/on/system:/path/in/container```. So for these, the same rules apply as when using Docker on the command line. Paths here should be given as absolute, don't use ```~``` or even ```$HOME```.

View File

@@ -1,6 +1,8 @@
---
slug: /intro
---
# introduction
# what is monitor?
If you have many servers running many applications, it can be a challenge to keep things organized and easily accessible. Without structure, things can become messy quickly, which means operational issues are more likely to arise and they can take longer to resolve. Ultimately these issues hinder productivity and waste valuable time. Monitor is a web app to provide this structure for how applications are built, deployed, and managed across many servers.
@@ -39,8 +41,4 @@ Monitor exposes powerful functionality over the core's REST API, enabling infras
Monitor is a system designed to be used by many users, whether they are developers, operations personnel, or administrators. The ability to affect an applications state is very powerful, so monitor has a granular permissioning system to only provide this functionality to the intended users. The permissioning system is explained in detail in the [permissioning](https://github.com/mbecker20/monitor/blob/main/docs/permissions.md) section.
User sign-on is possible using username / password, or with Oauth (Github and Google). Allowed login methods can be configured from the [core config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml).
[next: connecting servers](https://github.com/mbecker20/monitor/blob/main/docs/servers.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)
User sign-on is possible using username / password, or with Oauth (Github and Google). Allowed login methods can be configured from the [core config](https://github.com/mbecker20/monitor/blob/main/config_example/core.config.example.toml).

View File

@@ -25,8 +25,4 @@ Users also have some configurable global permissions, these are:
- create server permission
- create build permission
Only users with these permissions (as well as admins) can add additional servers to monitor, and can create additional builds, respectively.
[next: core setup](https://github.com/mbecker20/monitor/blob/main/docs/setup.md)
[back to table of contents](https://github.com/mbecker20/monitor/blob/main/readme.md)
Only users with these permissions (as well as admins) can add additional servers to monitor, and can create additional builds, respectively.

View File

@@ -0,0 +1,99 @@
// @ts-check
// Note: type annotations allow type checking and IDEs autocompletion
const lightCodeTheme = require('prism-react-renderer/themes/github');
const darkCodeTheme = require('prism-react-renderer/themes/dracula');
/** @type {import('@docusaurus/types').Config} */
const config = {
title: "monitor",
tagline: "distributed build and deployment system",
favicon: "img/favicon.ico",
// Set the production url of your site here
url: "https://mbecker20.github.io",
// Set the /<baseUrl>/ pathname under which your site is served
// For GitHub pages deployment, it is often '/<projectName>/'
baseUrl: "/monitor/",
// baseUrl: "/",
// GitHub pages deployment config.
// If you aren't using GitHub pages, you don't need these.
organizationName: "mbecker20", // Usually your GitHub org/user name.
projectName: "monitor", // Usually your repo name.
trailingSlash: false,
deploymentBranch: "gh-pages-docs",
onBrokenLinks: "throw",
onBrokenMarkdownLinks: "warn",
// Even if you don't use internalization, you can use this field to set useful
// metadata like html lang. For example, if your site is Chinese, you may want
// to replace "en" with "zh-Hans".
i18n: {
defaultLocale: "en",
locales: ["en"],
},
presets: [
[
"classic",
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
routeBasePath: "/",
sidebarPath: require.resolve("./sidebars.js"),
// Please change this to your repo.
// Remove this to remove the "edit this page" links.
editUrl: "https://github.com/mbecker20/monitor/tree/main/docsite",
},
theme: {
customCss: require.resolve("./src/css/custom.css"),
},
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
// Replace with your project's social card
image: "img/monitor-lizard.png",
docs: {
sidebar: {
autoCollapseCategories: true,
}
},
navbar: {
title: "monitor",
logo: {
alt: "monitor lizard",
src: "img/monitor-lizard.png",
},
items: [
{
type: "docSidebar",
sidebarId: "docs",
position: "left",
label: "docs",
},
{
href: "https://github.com/mbecker20/monitor",
label: "GitHub",
position: "right",
},
],
},
footer: {
style: "dark",
copyright: `Built with Docusaurus`,
},
prism: {
theme: lightCodeTheme,
darkTheme: darkCodeTheme,
},
}),
};
module.exports = config;

38
docsite/package.json Normal file
View File

@@ -0,0 +1,38 @@
{
"name": "docsite",
"version": "0.0.0",
"private": true,
"scripts": {
"start": "docusaurus start",
"deploy": "GIT_USER=mbecker20 docusaurus deploy"
},
"dependencies": {
"@docusaurus/core": "2.4.0",
"@docusaurus/preset-classic": "2.4.0",
"@mdx-js/react": "^1.6.22",
"clsx": "^1.2.1",
"prism-react-renderer": "^1.3.5",
"react": "^17.0.2",
"react-dom": "^17.0.2"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "2.4.0",
"@tsconfig/docusaurus": "^1.0.5",
"typescript": "^4.7.4"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
},
"engines": {
"node": ">=16.14"
}
}

87
docsite/sidebars.js Normal file
View File

@@ -0,0 +1,87 @@
/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
*/
// @ts-check
/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */
const sidebars = {
// By default, Docusaurus generates a sidebar from the docs folder structure
// docsSidebar: [{type: 'autogenerated', dirName: '.'}],
// But you can create a sidebar manually
docs: [
"intro",
"core-setup",
{
type: "category",
label: "connecting servers",
link: {
type: "doc",
id: "connecting-servers/index",
},
items: [
"connecting-servers/setup-periphery",
"connecting-servers/add-server",
],
},
{
type: "category",
label: "build images",
link: {
type: "doc",
id: "build-images/index",
},
items: [
"build-images/configuration",
"build-images/pre-build",
"build-images/choosing-builder",
"build-images/versioning",
],
},
{
type: "category",
label: "deploy containers",
link: {
type: "doc",
id: "deploy-containers/index",
},
items: [
"deploy-containers/configuration",
"deploy-containers/lifetime-management",
// "deploy-containers/choosing-builder",
// "deploy-containers/versioning",
],
},
"permissioning",
"file-paths",
{
type: "category",
label: "API",
link: {
type: "doc",
id: "api/index",
},
items: [
"api/types",
"api/authenticating-requests",
"api/login",
"api/api-secrets",
"api/build",
"api/deployment",
"api/server",
"api/permissions",
"api/websocket",
],
},
],
};
module.exports = sidebars;

View File

@@ -0,0 +1,15 @@
import React from "react";
export default function Divider() {
return (
<div
style={{
opacity: 0.7,
backgroundColor: "rgb(175, 175, 175)",
height: "3px",
width: "100%",
margin: "75px 0px"
}}
/>
);
}

View File

@@ -0,0 +1,67 @@
import React from 'react';
import clsx from 'clsx';
import styles from './styles.module.css';
type FeatureItem = {
title: string;
// Svg: React.ComponentType<React.ComponentProps<'svg'>>;
description: JSX.Element;
};
const FeatureList: FeatureItem[] = [
{
title: 'automated builds 🛠️',
// Svg: require('@site/static/img/undraw_docusaurus_mountain.svg').default,
description: (
<>
build auto versioned docker images from github repos, trigger builds on git push
</>
),
},
{
title: 'deploy docker containers 🚀',
// Svg: require('@site/static/img/undraw_docusaurus_tree.svg').default,
description: (
<>
deploy your builds (or any docker image), see uptime and logs across all your servers
</>
),
},
{
title: 'powered by Rust 🦀',
// Svg: require('@site/static/img/undraw_docusaurus_react.svg').default,
description: (
<>
The core API and periphery client are written in Rust
</>
),
},
];
function Feature({ title, description }: FeatureItem) {
return (
<div className={clsx('col col--4')}>
{/* <div className="text--center">
<Svg className={styles.featureSvg} role="img" />
</div> */}
<div className="text--center padding-horiz--md">
<h3>{title}</h3>
<p>{description}</p>
</div>
</div>
);
}
export default function HomepageFeatures(): JSX.Element {
return (
<section className={styles.features}>
<div className="container">
<div className="row">
{FeatureList.map((props, idx) => (
<Feature key={idx} {...props} />
))}
</div>
</div>
</section>
);
}

View File

@@ -0,0 +1,11 @@
.features {
display: flex;
align-items: center;
padding: 4rem 0;
width: 100%;
}
.featureSvg {
height: 200px;
width: 200px;
}

View File

@@ -0,0 +1,11 @@
import React from "react";
export default function MonitorLogo({ width = "4rem" }) {
return (
<img
style={{ width, height: "auto", opacity: 0.7 }}
src="img/monitor-lizard.png"
alt="monitor-lizard"
/>
);
}

View File

@@ -0,0 +1,13 @@
import React from "react";
export default function SummaryImg() {
return (
<div style={{ display: "flex", justifyContent: "center" }}>
<img
style={{ marginBottom: "4rem", width: "1000px" }}
src="img/monitor-summary.png"
alt="monitor-summary"
/>
</div>
);
}

View File

@@ -0,0 +1,30 @@
/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}

View File

@@ -0,0 +1,24 @@
/**
* CSS files with the .module.css suffix will be treated as CSS modules
* and scoped locally.
*/
.heroBanner {
padding: 4rem 0;
text-align: center;
position: relative;
overflow: hidden;
}
@media screen and (max-width: 996px) {
.heroBanner {
padding: 2rem;
}
}
.buttons {
display: grid;
gap: 1rem;
grid-template-columns: 1fr 1fr;
width: fit-content;
}

View File

@@ -0,0 +1,76 @@
import React from 'react';
import clsx from 'clsx';
import Link from '@docusaurus/Link';
import useDocusaurusContext from '@docusaurus/useDocusaurusContext';
import Layout from '@theme/Layout';
import HomepageFeatures from '@site/src/components/HomepageFeatures';
import styles from './index.module.css';
import SummaryImg from '../components/SummaryImg';
import MonitorLogo from '../components/MonitorLogo';
function HomepageHeader() {
const {siteConfig} = useDocusaurusContext();
return (
<header className={clsx("hero hero--primary", styles.heroBanner)}>
<div className="container">
<div style={{ display: "flex", gap: "1rem", justifyContent: "center" }}>
<div style={{ position: "relative" }}>
<MonitorLogo width="600px" />
<h1
className="hero__title"
style={{
margin: 0,
position: "absolute",
top: "40%",
left: "50%",
transform: "translate(-50%, -50%)",
}}
>
monitor
</h1>
</div>
</div>
<p className="hero__subtitle">{siteConfig.tagline}</p>
<div style={{ display: "flex", justifyContent: "center" }}>
<div className={styles.buttons}>
<Link className="button button--secondary button--lg" to="/intro">
docs
</Link>
<Link
className="button button--secondary button--lg"
to="https://github.com/mbecker20/monitor"
>
github
</Link>
<Link
className="button button--secondary button--lg"
to="https://github.com/mbecker20/monitor#readme"
style={{
width: "100%",
boxSizing: "border-box",
gridColumn: "span 2",
}}
>
screenshots
</Link>
</div>
</div>
</div>
</header>
);
}
export default function Home(): JSX.Element {
const {siteConfig} = useDocusaurusContext();
return (
<Layout title="monitor docs" description={siteConfig.tagline}>
{/* <SummaryImg /> */}
<HomepageHeader />
<main>
<HomepageFeatures />
{/* <SummaryImg /> */}
</main>
</Layout>
);
}

0
docsite/static/.nojekyll Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 69 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

7
docsite/tsconfig.json Normal file
View File

@@ -0,0 +1,7 @@
{
// This file is not used in compilation. It is here just for a nice editor experience.
"extends": "@tsconfig/docusaurus/tsconfig.json",
"compilerOptions": {
"baseUrl": "."
}
}

7617
docsite/yarn.lock Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -11,6 +11,7 @@
},
"license": "GPL v3.0",
"devDependencies": {
"@types/sanitize-html": "^2.9.0",
"sass": "^1.57.1",
"typescript": "^4.9.4",
"vite": "^4.0.3",
@@ -19,10 +20,12 @@
"dependencies": {
"@solidjs/router": "^0.6.0",
"@tanstack/solid-query": "^4.26.0",
"ansi-to-html": "^0.7.2",
"axios": "^1.2.1",
"js-file-download": "^0.4.12",
"lightweight-charts": "^3.8.0",
"reconnecting-websocket": "^4.4.0",
"sanitize-html": "^2.10.0",
"solid-js": "^1.6.6"
}
}

View File

@@ -14,7 +14,11 @@ export const NewGroup: Component<{}> = (p) => {
<Show
when={showNew()}
fallback={
<button class="green" onClick={toggleShowNew} style={{ width: "100%" }}>
<button
class="green"
onClick={toggleShowNew}
style={{ height: "100%" }}
>
<Icon type="plus" />
</button>
}
@@ -33,8 +37,12 @@ export const NewDeployment: Component<{ serverID: string }> = (p) => {
<Show
when={showNew()}
fallback={
<button class="green" onClick={toggleShowNew} style={{ width: "100%" }}>
<Icon type="plus" />
<button
class="green"
onClick={toggleShowNew}
style={{ width: "100%", height: "fit-content" }}
>
<Icon type="plus" width="1.2rem" />
</button>
}
>
@@ -56,8 +64,12 @@ export const NewBuild: Component<{}> = (p) => {
<Show
when={showNew()}
fallback={
<button class="green" onClick={toggleShowNew} style={{ width: "100%" }}>
<Icon type="plus" />
<button
class="green"
onClick={toggleShowNew}
style={{ width: "100%", height: "fit-content" }}
>
<Icon type="plus" width="1.2rem" />
</button>
}
>
@@ -87,25 +99,19 @@ const New: Component<{
}
};
return (
<Flex justifyContent="space-between">
<Flex justifyContent="space-between" style={{ height: "fit-content", width: "100%" }}>
<Input
ref={inputRef}
placeholder={p.placeholder}
value={name()}
onEdit={setName}
onEnter={create}
style={{ width: "20rem" }}
style={{ width: "100%", "min-width": "20rem" }}
/>
<Flex gap="0.4rem">
<button class="green" onClick={create}>
create
</button>
{/* <ConfirmButton
class="green"
onConfirm={create}
>
create
</ConfirmButton> */}
<button class="red" onClick={p.close}>
<Icon type="cross" />
</button>

View File

@@ -11,7 +11,11 @@ import { OPERATIONS } from "..";
import { useAppDimensions } from "../state/DimensionProvider";
import { useAppState } from "../state/StateProvider";
import { Operation, Update as UpdateType, UpdateStatus } from "../types";
import { readableMonitorTimestamp, readableVersion } from "../util/helpers";
import {
getId,
readableMonitorTimestamp,
readableVersion,
} from "../util/helpers";
import Icon from "./shared/Icon";
import Input from "./shared/Input";
import Flex from "./shared/layout/Flex";
@@ -40,10 +44,17 @@ const Updates: Component<{}> = (p) => {
if (username?.includes(search())) return true;
});
});
const [openMenu, setOpenMenu] = createSignal<string | undefined>(undefined);
return (
<Grid class="full-width card shadow">
<Flex alignItems="center" justifyContent="space-between">
<h1>updates</h1>
<Flex>
<h1>updates</h1>
<UpdateMenu
update={openMenu() ? updates.get(openMenu()!) : undefined}
closeMenu={() => setOpenMenu(undefined)}
/>
</Flex>
<Flex alignItems="center">
<Input class="lightgrey" placeholder="search" onEdit={setSearch} />
<Selector
@@ -73,7 +84,12 @@ const Updates: Component<{}> = (p) => {
}
>
<For each={filtered_updates()}>
{(update) => <Update update={update} />}
{(update) => (
<Update
update={update}
openMenu={() => setOpenMenu(getId(update))}
/>
)}
</For>
<Show when={!updates.noMore()}>
<button
@@ -94,7 +110,7 @@ const Updates: Component<{}> = (p) => {
export default Updates;
const Update: Component<{ update: UpdateType }> = (p) => {
const Update: Component<{ update: UpdateType; openMenu: () => void }> = (p) => {
const { isMobile } = useAppDimensions();
const { usernames, name_from_update_target } = useAppState();
const name = () => name_from_update_target(p.update.target);
@@ -113,9 +129,10 @@ const Update: Component<{ update: UpdateType }> = (p) => {
};
return (
<Flex
class="card light shadow wrap"
class="card light hover shadow wrap pointer"
justifyContent="space-between"
alignItems="center"
onClick={p.openMenu}
>
<Flex
alignItems="center"
@@ -149,7 +166,9 @@ const Update: Component<{ update: UpdateType }> = (p) => {
<div style={{ "place-self": "center end" }}>
{readableMonitorTimestamp(p.update.start_ts)}
</div>
<UpdateMenu update={p.update} />
{/* <button class="blue" onClick={p.openMenu}>
<Icon type="console" />
</button> */}
</Flex>
</Flex>
</Flex>

View File

@@ -5,13 +5,13 @@ import { useAppState } from "../../state/StateProvider";
import { useUser } from "../../state/UserProvider";
import { PermissionLevel } from "../../types";
import { getId } from "../../util/helpers";
import Flex from "../shared/layout/Flex";
import Grid from "../shared/layout/Grid";
import Flex from "../shared/layout/Flex";
const Resources: Component<{}> = (p) => {
const { user, user_id } = useUser();
const { user_id } = useUser();
const { isMobile } = useAppDimensions();
const { builds, deployments, servers } = useAppState();
const { builds, deployments, servers, groups } = useAppState();
const [search, setSearch] = createSignal("");
const _servers = createMemo(() => {
return servers.filterArray((s) => {
@@ -34,6 +34,13 @@ const Resources: Component<{}> = (p) => {
return p ? p !== PermissionLevel.None : false;
});
});
const _groups = createMemo(() => {
return groups.filterArray((b) => {
if (!b.name.includes(search())) return false;
const p = b.permissions?.[user_id()];
return p ? p !== PermissionLevel.None : false;
});
});
return (
<>
<Grid
@@ -55,7 +62,9 @@ const Resources: Component<{}> = (p) => {
>
<Grid gap="0.25rem">
<h2>{item.server.name}</h2>
<div class="dimmed">{item.server.region || "unknown region"}</div>
<div class="dimmed">
{item.server.region || "unknown region"}
</div>
</Grid>
<div>{item.server.permissions?.[user_id()] || "none"}</div>
</A>
@@ -117,6 +126,27 @@ const Resources: Component<{}> = (p) => {
</For>
</Grid>
</Grid>
<Grid
class="card shadow"
style={{ width: "100%", "box-sizing": "border-box" }}
>
<h1>groups</h1>
<Grid gridTemplateColumns={isMobile() ? undefined : "1fr 1fr"}>
<For each={_groups()}>
{(item) => (
<Flex
class="card light shadow hover full-width"
style={{
"justify-content": "space-between",
}}
>
<h2>{item.name}</h2>
<div>{item.permissions?.[user_id()] || "none"}</div>
</Flex>
)}
</For>
</Grid>
</Grid>
</>
);
};

View File

@@ -33,12 +33,15 @@ const Build: Component<{}> = (p) => {
}
});
});
onCleanup(() => unsub);
onCleanup(() => unsub());
const userCanUpdate = () =>
user().admin ||
build()?.permissions![user_id()] === PermissionLevel.Update;
return (
<Show when={build()} fallback={<NotFound type="build" loaded={builds.loaded()} />}>
<Show
when={build()}
fallback={<NotFound type="build" loaded={builds.loaded()} />}
>
<ActionStateProvider build_id={params.id}>
<Grid
style={{
@@ -46,12 +49,12 @@ const Build: Component<{}> = (p) => {
"box-sizing": "border-box",
}}
>
<Header />
<Grid
style={{ width: "100%" }}
gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}
>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto auto 1fr" }}>
<Header />
<Grid gridTemplateRows="auto 1fr" style={{ "flex-grow": 1 }}>
<Description
target={{ type: "Build", id: params.id }}
name={build()?.name!}

View File

@@ -1,6 +1,7 @@
import {
Component,
createEffect,
createSignal,
For,
onCleanup,
Show,
@@ -9,14 +10,17 @@ import { useUpdates } from "../../state/hooks";
import { useAppState } from "../../state/StateProvider";
import Update from "../update/Update";
import Grid from "../shared/layout/Grid";
import { combineClasses } from "../../util/helpers";
import { combineClasses, getId } from "../../util/helpers";
import { useParams } from "@solidjs/router";
import Flex from "../shared/layout/Flex";
import UpdateMenu from "../update/UpdateMenu";
import Loading from "../shared/loading/Loading";
const Updates: Component<{}> = (p) => {
const { ws } = useAppState();
const params = useParams();
const updates = useUpdates({ type: "Build", id: params.id });
const [openMenu, setOpenMenu] = createSignal<string | undefined>(undefined);
let unsub = () => {};
createEffect(() => {
unsub();
@@ -26,27 +30,47 @@ const Updates: Component<{}> = (p) => {
}
});
});
onCleanup(() => unsub())
onCleanup(() => unsub());
return (
<Grid
class={combineClasses("card shadow")}
style={{ "min-width": "350px" }}
>
<h1>updates</h1>
<Grid class="updates-container scroller">
<For each={updates.collection()}>
{(update) => <Update update={update} />}
</For>
<Show when={!updates.noMore()}>
<button
class="grey"
style={{ width: "100%" }}
onClick={() => updates.loadMore()}
>
load more
</button>
</Show>
</Grid>
<Flex>
<h1>updates</h1>
<UpdateMenu
update={openMenu() ? updates.get(openMenu()!) : undefined}
closeMenu={() => setOpenMenu(undefined)}
/>
</Flex>
<Show
when={updates.loaded()}
fallback={
<Flex class="full-size" alignItems="center" justifyContent="center">
<Loading type="three-dot" scale={0.7} />
</Flex>
}
>
<Grid class="updates-container scroller">
<For each={updates.collection()}>
{(update) => (
<Update
update={update}
openMenu={() => setOpenMenu(getId(update))}
/>
)}
</For>
<Show when={!updates.noMore()}>
<button
class="grey"
style={{ width: "100%" }}
onClick={() => updates.loadMore()}
>
load more
</button>
</Show>
</Grid>
</Show>
</Grid>
);
};

View File

@@ -7,7 +7,7 @@ import {
useContext,
} from "solid-js";
import { createStore, SetStoreFunction } from "solid-js/store";
import { client } from "../../..";
import { client, pushNotification } from "../../..";
import { useAppState } from "../../../state/StateProvider";
import { useUser } from "../../../state/UserProvider";
import { Build, Operation, PermissionLevel, ServerWithStatus } from "../../../types";
@@ -55,10 +55,17 @@ export const ConfigProvider: ParentComponent<{}> = (p) => {
client.get_build(params.id).then((build) => {
set({
...build,
_id: { $oid: params.id } as any,
repo: build.repo,
branch: build.branch,
pre_build: build.pre_build,
docker_build_args: build.docker_build_args,
docker_build_args: {
build_path: build.docker_build_args?.build_path!,
dockerfile_path: build.docker_build_args?.dockerfile_path,
build_args: build.docker_build_args?.build_args,
extra_args: build.docker_build_args?.extra_args,
use_buildx: build.docker_build_args?.use_buildx,
},
docker_account: build.docker_account,
github_account: build.github_account,
aws_config: build.aws_config,
@@ -72,7 +79,13 @@ export const ConfigProvider: ParentComponent<{}> = (p) => {
const save = () => {
setBuild("saving", true);
client.update_build(build)
client
.update_build(build)
.catch((e) => {
console.error(e);
pushNotification("bad", "update build failed");
setBuild("saving", false);
});
};
let update_unsub = () => {};

View File

@@ -24,9 +24,11 @@ const BuildConfig: Component<{}> = (p) => {
<Repo />
<Docker />
<CliBuild />
<BuildArgs />
<ExtraArgs />
<UseBuildx />
<Show when={build.docker_build_args?.build_path}>
<BuildArgs />
<ExtraArgs />
<UseBuildx />
</Show>
<Show when={userCanUpdate()}>
<WebhookUrl />
</Show>

View File

@@ -9,7 +9,7 @@ const ExtraArgs: Component<{}> = (p) => {
const { build, setBuild, userCanUpdate } = useConfig();
const onAdd = () => {
setBuild("docker_build_args", "extra_args", (extra_args: any) => [
...extra_args,
...(extra_args || []),
"",
]);
};
@@ -28,7 +28,7 @@ const ExtraArgs: Component<{}> = (p) => {
</button>
</Show>
</Flex>
<For each={[...build.docker_build_args!.extra_args!.keys()]}>
<For each={[...(build.docker_build_args?.extra_args?.keys() || [])]}>
{(_, index) => (
<Flex
justifyContent={userCanUpdate() ? "space-between" : undefined}
@@ -37,7 +37,7 @@ const ExtraArgs: Component<{}> = (p) => {
>
<Input
placeholder="--extra-arg=value"
value={build.docker_build_args!.extra_args![index()]}
value={build.docker_build_args?.extra_args?.[index()] || ""}
style={{ width: "80%" }}
onEdit={(value) =>
setBuild("docker_build_args", "extra_args", index(), value)

View File

@@ -1,4 +1,12 @@
import { Component, Match, Show, Switch } from "solid-js";
import {
Component,
Match,
Setter,
Show,
Signal,
Switch,
createSignal,
} from "solid-js";
import { client } from "../..";
import { useAppState } from "../../state/StateProvider";
import { useUser } from "../../state/UserProvider";
@@ -13,10 +21,15 @@ import { combineClasses } from "../../util/helpers";
import { A, useParams } from "@solidjs/router";
import {
DockerContainerState,
Operation,
PermissionLevel,
ServerStatus,
TerminationSignal,
TerminationSignalLabel,
UpdateStatus,
} from "../../types";
import ConfirmMenuButton from "../shared/ConfirmMenuButton";
import Selector from "../shared/menu/Selector";
const Actions: Component<{}> = (p) => {
const { deployments, builds, servers, getPermissionOnDeployment } =
@@ -161,12 +174,12 @@ const Build: Component = () => {
};
const Deploy: Component<{ redeploy?: boolean }> = (p) => {
// const { deployments } = useAppState();
const params = useParams();
// const deployment = () => deployments.get(params.id)!;
const actions = useActionStates();
const { deployments } = useAppState();
const name = () => deployments.get(params.id)?.deployment.name;
const deployment = () => deployments.get(params.id);
const name = () => deployment()?.deployment.name;
const [termSignalLabel, setTermSignalLabel] = useTermSignalLabel();
return (
<Show
when={!actions.deploying}
@@ -194,9 +207,19 @@ const Deploy: Component<{ redeploy?: boolean }> = (p) => {
<ConfirmMenuButton
class="green"
onConfirm={() => {
client.deploy_container(params.id);
client.deploy_container(params.id, {
stop_signal: ((termSignalLabel().signal as any) === "default"
? undefined
: termSignalLabel().signal) as TerminationSignal,
});
}}
title="redeploy container"
configs={
<TermSignalSelector
termSignalLabel={termSignalLabel()}
setTermSignalLabel={setTermSignalLabel}
/>
}
match={name()!}
>
<Icon type={"reset"} />
@@ -216,6 +239,7 @@ const RemoveContainer = () => {
const actions = useActionStates();
const { deployments } = useAppState();
const name = () => deployments.get(params.id)?.deployment.name;
const [termSignalLabel, setTermSignalLabel] = useTermSignalLabel();
return (
<Show
when={!actions.removing}
@@ -230,9 +254,19 @@ const RemoveContainer = () => {
<ConfirmMenuButton
class="red"
onConfirm={() => {
client.remove_container(params.id);
client.remove_container(params.id, {
stop_signal: ((termSignalLabel().signal as any) === "default"
? undefined
: termSignalLabel().signal) as TerminationSignal,
});
}}
title="destroy container"
configs={
<TermSignalSelector
termSignalLabel={termSignalLabel()}
setTermSignalLabel={setTermSignalLabel}
/>
}
match={name()!}
>
<Icon type="trash" />
@@ -282,6 +316,7 @@ const Stop = () => {
const actions = useActionStates();
const { deployments } = useAppState();
const name = () => deployments.get(params.id)?.deployment.name;
const [termSignalLabel, setTermSignalLabel] = useTermSignalLabel();
return (
<Show
when={!actions.stopping}
@@ -296,9 +331,17 @@ const Stop = () => {
<ConfirmMenuButton
class="orange"
onConfirm={() => {
client.stop_container(params.id);
client.stop_container(params.id, {
stop_signal: termSignalLabel().signal,
});
}}
title="stop container"
configs={
<TermSignalSelector
termSignalLabel={termSignalLabel()}
setTermSignalLabel={setTermSignalLabel}
/>
}
match={name()!}
>
<Icon type="pause" />
@@ -374,4 +417,68 @@ const Reclone = () => {
);
};
const TermSignalSelector: Component<{
termSignalLabel: TerminationSignalLabel;
setTermSignalLabel: Setter<TerminationSignalLabel>;
}> = (p) => {
const params = useParams();
const { deployments } = useAppState();
const deployment = () => deployments.get(params.id);
return (
<Show
when={
deployment()?.state === DockerContainerState.Running &&
(deployment()?.deployment.term_signal_labels?.length || 0) > 1
}
>
<Flex
class="full-width wrap"
justifyContent="space-between"
alignItems="center"
>
<div class="dimmed">termination signal: </div>
<Selector
targetClass="blue"
selected={p.termSignalLabel}
items={deployment()?.deployment.term_signal_labels || []}
itemMap={({ signal, label }) => (
<Flex gap="0.5rem" alignItems="center">
<div>{signal}</div>
<Show when={label.length > 0}>
<div class="dimmed">{label}</div>
</Show>
</Flex>
)}
onSelect={(signal) => p.setTermSignalLabel(signal)}
position="bottom right"
/>
</Flex>
</Show>
);
};
function useTermSignalLabel(): Signal<TerminationSignalLabel> {
const params = useParams();
const { deployments, ws } = useAppState();
const deployment = () => deployments.get(params.id)?.deployment;
const term_signal = () =>
deployment()?.termination_signal || TerminationSignal.SigTerm;
const default_term_signal_label = () => ({
signal: term_signal(),
label:
deployment()?.term_signal_labels?.find(
({ signal }) => signal === term_signal()
)?.label || "",
});
const [label, setLabel] = createSignal<TerminationSignalLabel>(
default_term_signal_label()
);
ws.subscribe([Operation.UpdateDeployment], (update) => {
if (update.status === UpdateStatus.Complete) {
setTimeout(() => setLabel(default_term_signal_label()), 100);
}
});
return [label, setLabel];
}
export default Actions;

View File

@@ -3,7 +3,7 @@ import { Component, Show } from "solid-js";
import { useAppDimensions } from "../../state/DimensionProvider";
import { useAppState } from "../../state/StateProvider";
import { useUser } from "../../state/UserProvider";
import { PermissionLevel } from "../../types";
import { PermissionLevel, TerminationSignal } from "../../types";
import Description from "../Description";
import NotFound from "../NotFound";
import Grid from "../shared/layout/Grid";
@@ -16,6 +16,13 @@ import Updates from "./Updates";
const POLLING_RATE = 10000;
// let interval = -1;
export const TERM_SIGNALS = [
TerminationSignal.SigTerm,
TerminationSignal.SigInt,
TerminationSignal.SigQuit,
TerminationSignal.SigHup,
];
const Deployment: Component<{}> = (p) => {
const { user, user_id } = useUser();
const { servers, deployments } = useAppState();
@@ -47,12 +54,12 @@ const Deployment: Component<{}> = (p) => {
"box-sizing": "border-box",
}}
>
<Header />
<Grid
style={{ width: "100%" }}
gridTemplateColumns={isSemiMobile() ? "1fr" : "1fr 1fr"}
>
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto auto 1fr" }}>
<Header />
<Grid style={{ "flex-grow": 1, "grid-auto-rows": "auto 1fr" }}>
<Description
target={{ type: "Deployment", id: params.id }}
name={deployment()?.deployment.name!}

View File

@@ -2,7 +2,6 @@ import { Component, createResource, createSignal, Show } from "solid-js";
import { useAppState } from "../../state/StateProvider";
import { useUser } from "../../state/UserProvider";
import {
combineClasses,
deploymentHeaderStateClass,
getId,
readableVersion,
@@ -79,7 +78,7 @@ const Header: Component<{}> = (p) => {
<>
<Grid
gap="0.5rem"
class={combineClasses("card shadow")}
class="card shadow"
style={{
position: "relative",
cursor: isSemiMobile() ? "pointer" : undefined,
@@ -116,6 +115,7 @@ const Header: Component<{}> = (p) => {
setUpdatingName(false);
}}
onBlur={() => setEditingName(false)}
style={{ "font-size": "1.4rem" }}
/>
</Show>
</Show>
@@ -169,10 +169,10 @@ const Header: Component<{}> = (p) => {
class="text-hover"
style={{ opacity: 0.7, padding: 0 }}
>
{server()?.server.name}
{server()?.server.name || "unknown"}
</A>
<div class={deploymentHeaderStateClass(deployment().state)}>
{deployment().state}
{deployment().state.replaceAll("_", " ")}
</div>
</Flex>
<Show when={status()}>

View File

@@ -1,13 +1,14 @@
import { Component, createEffect, For, onCleanup, Show } from "solid-js";
import { Component, createEffect, createSignal, For, onCleanup, Show } from "solid-js";
import { useUpdates } from "../../state/hooks";
import Grid from "../shared/layout/Grid";
import Update from "../update/Update";
import { useAppState } from "../../state/StateProvider";
import { combineClasses } from "../../util/helpers";
import { combineClasses, getId } from "../../util/helpers";
import { Operation } from "../../types";
import Flex from "../shared/layout/Flex";
import Loading from "../shared/loading/Loading";
import { useParams } from "@solidjs/router";
import UpdateMenu from "../update/UpdateMenu";
const Updates: Component<{}> = (p) => {
const { ws, deployments } = useAppState();
@@ -15,6 +16,7 @@ const Updates: Component<{}> = (p) => {
const deployment = () => deployments.get(params.id)!
const updates = useUpdates({ type: "Deployment", id: params.id }, true);
const buildID = () => deployment()?.deployment.build_id;
const [openMenu, setOpenMenu] = createSignal<string | undefined>(undefined);
let unsub = () => {};
createEffect(() => {
unsub();
@@ -32,18 +34,29 @@ const Updates: Component<{}> = (p) => {
onCleanup(() => unsub());
return (
<Grid class={combineClasses("card shadow")} style={{ "flex-grow": 1 }}>
<h1>updates</h1>
<Flex>
<h1>updates</h1>
<UpdateMenu
update={openMenu() ? updates.get(openMenu()!) : undefined}
closeMenu={() => setOpenMenu(undefined)}
/>
</Flex>
<Show
when={updates.loaded()}
fallback={
<Flex justifyContent="center">
<Loading type="three-dot" />
<Flex class="full-size" alignItems="center" justifyContent="center">
<Loading type="three-dot" scale={0.7} />
</Flex>
}
>
<Grid class="updates-container scroller">
<For each={updates.collection()}>
{(update) => <Update update={update} />}
{(update) => (
<Update
update={update}
openMenu={() => setOpenMenu(getId(update))}
/>
)}
</For>
<Show when={!updates.noMore()}>
<button

View File

@@ -21,6 +21,8 @@ import { useAppDimensions } from "../../../../state/DimensionProvider";
import SimpleTabs from "../../../shared/tabs/SimpleTabs";
import ExtraArgs from "./container/ExtraArgs";
import WebhookUrl from "./container/WebhookUrl";
import RedeployOnBuild from "./container/RedeployOnBuild";
import TerminationSignals, { DefaultTerminationSignal, DefaultTerminationTimeout } from "./termination/TerminationSignals";
const Config: Component<{}> = () => {
const { deployment, reset, save, userCanUpdate } = useConfig();
@@ -47,6 +49,20 @@ const Config: Component<{}> = () => {
<Mounts />
<ExtraArgs />
<PostImage />
<RedeployOnBuild />
<Show when={isMobile()}>
<div style={{ height: "1rem" }} />
</Show>
</Grid>
),
},
{
title: "termination",
element: () => (
<Grid class="config-items" placeItems="start center" style={{ "margin-bottom": "" }}>
<TerminationSignals />
<DefaultTerminationSignal />
<DefaultTerminationTimeout />
<Show when={isMobile()}>
<div style={{ height: "1rem" }} />
</Show>

View File

@@ -62,6 +62,8 @@ export const ConfigProvider: ParentComponent<{}> = (p) => {
// console.log("loading deployment");
client.get_deployment(params.id).then((deployment) =>
set({
...deployment,
_id: { $oid: params.id } as any,
name: deployment.deployment.name,
server_id: deployment.deployment.server_id,
permissions: deployment.deployment.permissions,
@@ -87,6 +89,10 @@ export const ConfigProvider: ParentComponent<{}> = (p) => {
repo_mount: deployment.deployment.repo_mount,
created_at: deployment.deployment.created_at,
updated_at: deployment.deployment.updated_at,
redeploy_on_build: deployment.deployment.redeploy_on_build,
term_signal_labels: deployment.deployment.term_signal_labels,
termination_signal: deployment.deployment.termination_signal,
termination_timeout: deployment.deployment.termination_timeout,
loaded: true,
updated: false,
updating: false,
@@ -108,11 +114,13 @@ export const ConfigProvider: ParentComponent<{}> = (p) => {
const save = () => {
setDeployment("updating", true);
client.update_deployment(deployment).catch((e) => {
console.error(e);
pushNotification("bad", "update deployment failed");
setDeployment("updating", false);
});
client
.update_deployment(deployment)
.catch((e) => {
console.error(e);
pushNotification("bad", "update deployment failed");
setDeployment("updating", false);
});
};
let update_unsub = () => {};

Some files were not shown because too many files have changed in this diff Show More