This commit is contained in:
mbecker20
2024-08-15 03:24:52 -04:00
parent 955670d979
commit 5dfd007580
13 changed files with 44 additions and 50 deletions

View File

@@ -91,7 +91,11 @@ pub async fn get_stack_and_server(
Ok((stack, server))
}
pub fn compose_container_match_regex(container_name: &str) -> anyhow::Result<Regex> {
pub fn compose_container_match_regex(
container_name: &str,
) -> anyhow::Result<Regex> {
let regex = format!("^{container_name}-?[0-9]*$");
Regex::new(&regex).with_context(|| format!("failed to construct valid regex from {regex}"))
}
Regex::new(&regex).with_context(|| {
format!("failed to construct valid regex from {regex}")
})
}

View File

@@ -1,4 +1,7 @@
use std::{fs, path::{Path, PathBuf}};
use std::{
fs,
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context};
use formatting::format_serror;

View File

@@ -44,7 +44,7 @@ pub async fn handle_build_webhook(
if request_branch != build.config.branch {
return Err(anyhow!("request branch does not match expected"));
}
let user = git_webhook_user().to_owned();
let req = ExecuteRequest::RunBuild(RunBuild { build: build_id });
let update = init_execution_update(&req, &user).await?;

View File

@@ -36,11 +36,9 @@ pub async fn alert_stacks(
if status.curr.state != prev {
// send alert
let Ok(stack) =
resource::get::<Stack>(&status.curr.id)
.await
.inspect_err(|e| {
error!("failed to get stack from db | {e:#?}")
})
resource::get::<Stack>(&status.curr.id).await.inspect_err(
|e| error!("failed to get stack from db | {e:#?}"),
)
else {
continue;
};

View File

@@ -788,9 +788,11 @@ where
ResourceTarget::Stack(id) => ("recents.Stack", id),
ResourceTarget::Builder(id) => ("recents.Builder", id),
ResourceTarget::Alerter(id) => ("recents.Alerter", id),
ResourceTarget::ServerTemplate(id) => ("recents.ServerTemplate", id),
ResourceTarget::ServerTemplate(id) => {
("recents.ServerTemplate", id)
}
ResourceTarget::ResourceSync(id) => ("recents.ResourceSync", id),
ResourceTarget::System(_) => return
ResourceTarget::System(_) => return,
};
if let Err(e) = db_client()
.await

View File

@@ -96,4 +96,4 @@ pub struct PruneImages {
pub struct PruneContainers {
/// Id or name
pub server: String,
}
}

View File

@@ -92,7 +92,7 @@ pub type CreateDockerRegistryAccountResponse = DockerRegistryAccount;
pub struct UpdateDockerRegistryAccount {
/// The id of the docker registry to update
pub id: String,
/// The partial docker registry account.
/// The partial docker registry account.
pub account: _PartialDockerRegistryAccount,
}

View File

@@ -4,7 +4,9 @@ use serde::{Deserialize, Serialize};
use typeshare::typeshare;
use crate::entities::{
stack::{Stack, _PartialStackConfig}, update::Update, NoData
stack::{Stack, _PartialStackConfig},
update::Update,
NoData,
};
use super::MonitorWriteRequest;

View File

@@ -160,7 +160,6 @@ pub struct CoreConfig {
// ===========
// = General =
// ===========
/// The title of this monitor deployment. Will be used in the browser page title.
/// Default: 'Monitor'
#[serde(default = "default_title")]
@@ -188,7 +187,6 @@ pub struct CoreConfig {
// ============
// = Database =
// ============
/// Configure core mongo connection.
///
/// An easy deployment method is to use Mongo Atlas to provide
@@ -198,7 +196,6 @@ pub struct CoreConfig {
// ================
// = Auth / Login =
// ================
/// enable login with local auth
#[serde(default)]
pub local_auth: bool,
@@ -226,7 +223,6 @@ pub struct CoreConfig {
// =========
// = Oauth =
// =========
/// Configure google oauth
#[serde(default)]
pub google_oauth: OauthCredentials,
@@ -238,7 +234,6 @@ pub struct CoreConfig {
// ============
// = Webhooks =
// ============
/// Used to verify validity from webhooks.
/// Should be some secure hash maybe 20-40 chars.
/// It is given to git provider when configuring the webhook.
@@ -261,7 +256,6 @@ pub struct CoreConfig {
// ===========
// = Logging =
// ===========
/// Configure logging
#[serde(default)]
pub logging: LogConfig,
@@ -269,7 +263,6 @@ pub struct CoreConfig {
// ===========
// = Pruning =
// ===========
/// Number of days to keep stats, or 0 to disable pruning.
/// Stats older than this number of days are deleted on a daily cycle
/// Default: 14
@@ -285,7 +278,6 @@ pub struct CoreConfig {
// ==================
// = Poll Intervals =
// ==================
/// Interval at which to poll stacks for any updates / automated actions.
/// Options: `15-sec`, `1-min`, `5-min`, `15-min`, `1-hr`
/// Default: `5-min`.
@@ -318,7 +310,6 @@ pub struct CoreConfig {
// ===================
// = Cloud Providers =
// ===================
/// Configure AWS credentials to use with AWS builds / server launches.
#[serde(default)]
pub aws: AwsCredentials,
@@ -330,7 +321,6 @@ pub struct CoreConfig {
// =================
// = Git Providers =
// =================
/// Configure git credentials used to clone private repos.
/// Supports any git provider.
#[serde(default, alias = "git_provider")]
@@ -339,7 +329,6 @@ pub struct CoreConfig {
// ======================
// = Registry Providers =
// ======================
/// Configure docker credentials used to push / pull images.
/// Supports any docker image repository.
#[serde(default, alias = "docker_registry")]
@@ -352,7 +341,6 @@ pub struct CoreConfig {
// ===========
// = Secrets =
// ===========
/// Configure core-based secrets. These will be preferentially interpolated into
/// values if they contain a matching secret. Otherwise, the periphery will have to have the
/// secret configured.
@@ -362,7 +350,6 @@ pub struct CoreConfig {
// =========
// = Other =
// =========
/// Specify the directory used to clone stack / repo / build repos, for latest hash / contents.
/// The default is fine when using a container.
/// This directory has no need for persistence, so no need to mount it.

View File

@@ -288,18 +288,18 @@ fn default_stats_polling_rate() -> Timelength {
impl Default for PeripheryConfig {
fn default() -> Self {
Self {
port: default_periphery_port(),
repo_dir: default_repo_dir(),
stack_dir: default_stack_dir(),
stats_polling_rate: default_stats_polling_rate(),
legacy_compose_cli: Default::default(),
logging: Default::default(),
allowed_ips: Default::default(),
passkeys: Default::default(),
secrets: Default::default(),
git_providers: Default::default(),
docker_registries: Default::default(),
Self {
port: default_periphery_port(),
repo_dir: default_repo_dir(),
stack_dir: default_stack_dir(),
stats_polling_rate: default_stats_polling_rate(),
legacy_compose_cli: Default::default(),
logging: Default::default(),
allowed_ips: Default::default(),
passkeys: Default::default(),
secrets: Default::default(),
git_providers: Default::default(),
docker_registries: Default::default(),
}
}
}
}

View File

@@ -89,7 +89,7 @@ impl Default for ProcedureConfig {
Self {
stages: Default::default(),
webhook_enabled: default_webhook_enabled(),
webhook_secret: Default::default()
webhook_secret: Default::default(),
}
}
}

View File

@@ -10,9 +10,7 @@ pub type _PartialGitProviderAccount = PartialGitProviderAccount;
/// Configuration to access private git repos from various git providers.
/// Note. Cannot create two accounts with the same domain and username.
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Partial,
)]
#[derive(Serialize, Deserialize, Debug, Clone, Default, Partial)]
#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)]
#[partial(skip_serializing_none, from, diff)]
#[cfg_attr(
@@ -66,9 +64,7 @@ pub type _PartialDockerRegistryAccount = PartialDockerRegistryAccount;
/// Configuration to access private image repositories on various registries.
#[typeshare]
#[derive(
Serialize, Deserialize, Debug, Clone, Default, Partial,
)]
#[derive(Serialize, Deserialize, Debug, Clone, Default, Partial)]
#[partial_derive(Serialize, Deserialize, Debug, Clone, Default)]
#[partial(skip_serializing_none, from, diff)]
#[cfg_attr(
@@ -108,4 +104,4 @@ pub struct DockerRegistryAccount {
fn default_registry_domain() -> String {
String::from("docker.io")
}
}

View File

@@ -1,5 +1,7 @@
use monitor_client::entities::{
stack::{ComposeContents, ComposeProject, Stack}, update::Log, SearchCombinator,
stack::{ComposeContents, ComposeProject, Stack},
update::Log,
SearchCombinator,
};
use resolver_api::derive::Request;
use serde::{Deserialize, Serialize};