mirror of
https://github.com/fosrl/pangolin.git
synced 2025-12-05 19:17:29 -06:00
Merge branch 'dev' into clients-user
This commit is contained in:
@@ -31,6 +31,7 @@ proxy-resources:
|
||||
# - owen@pangolin.net
|
||||
# whitelist-users:
|
||||
# - owen@pangolin.net
|
||||
# auto-login-idp: 1
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
|
||||
traefik:
|
||||
image: traefik:v3.5
|
||||
image: traefik:v3.6
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
@@ -52,4 +52,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
enable_ipv6: true
|
||||
enable_ipv6: true
|
||||
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.5
|
||||
image: docker.io/traefik:v3.6
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
{{if .InstallGerbil}}
|
||||
@@ -59,4 +59,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
|
||||
@@ -1080,11 +1080,11 @@
|
||||
"actionDeleteIdpOrg": "IDP-Organisationsrichtlinie löschen",
|
||||
"actionListIdpOrgs": "IDP-Organisationen auflisten",
|
||||
"actionUpdateIdpOrg": "IDP-Organisation aktualisieren",
|
||||
"actionCreateClient": "Client anlegen",
|
||||
"actionDeleteClient": "Client löschen",
|
||||
"actionUpdateClient": "Client aktualisieren",
|
||||
"actionCreateClient": "Kunde erstellen",
|
||||
"actionDeleteClient": "Kunde löschen",
|
||||
"actionUpdateClient": "Kunde aktualisieren",
|
||||
"actionListClients": "Clients auflisten",
|
||||
"actionGetClient": "Clients abrufen",
|
||||
"actionGetClient": "Kunde holen",
|
||||
"actionCreateSiteResource": "Site-Ressource erstellen",
|
||||
"actionDeleteSiteResource": "Site-Ressource löschen",
|
||||
"actionGetSiteResource": "Site-Ressource abrufen",
|
||||
@@ -1432,14 +1432,14 @@
|
||||
},
|
||||
"siteRequired": "Standort ist erforderlich.",
|
||||
"olmTunnel": "Olm-Tunnel",
|
||||
"olmTunnelDescription": "Nutzen Sie Olm für die Kundenverbindung",
|
||||
"olmTunnelDescription": "Nutzen Sie Olm für die Client-Verbindung",
|
||||
"errorCreatingClient": "Fehler beim Erstellen des Clients",
|
||||
"clientDefaultsNotFound": "Standardeinstellungen des Clients nicht gefunden",
|
||||
"createClient": "Client erstellen",
|
||||
"createClientDescription": "Erstellen Sie einen neuen Client für die Verbindung zu Ihren Standorten.",
|
||||
"seeAllClients": "Alle Clients anzeigen",
|
||||
"clientInformation": "Client Informationen",
|
||||
"clientNamePlaceholder": "Client Name",
|
||||
"clientInformation": "Client-Informationen",
|
||||
"clientNamePlaceholder": "Client-Name",
|
||||
"address": "Adresse",
|
||||
"subnetPlaceholder": "Subnetz",
|
||||
"addressDescription": "Die Adresse, die dieser Client für die Verbindung verwenden wird.",
|
||||
@@ -2110,7 +2110,6 @@
|
||||
"selectedResources": "Ausgewählte Ressourcen",
|
||||
"enableSelected": "Ausgewählte aktivieren",
|
||||
"disableSelected": "Ausgewählte deaktivieren",
|
||||
"checkSelectedStatus": "Status der Auswahl überprüfen",
|
||||
"credentials": "Zugangsdaten",
|
||||
"savecredentials": "Zugangsdaten speichern",
|
||||
"regeneratecredentials": "Re-Key",
|
||||
@@ -2136,5 +2135,6 @@
|
||||
"niceIdUpdateErrorDescription": "Beim Aktualisieren der Nizza-ID ist ein Fehler aufgetreten.",
|
||||
"niceIdCannotBeEmpty": "Nizza-ID darf nicht leer sein",
|
||||
"enterIdentifier": "Identifikator eingeben",
|
||||
"identifier": "Identifier"
|
||||
"identifier": "Identifier",
|
||||
"checkSelectedStatus": "Status der Auswahl überprüfen"
|
||||
}
|
||||
|
||||
@@ -528,6 +528,8 @@
|
||||
"targetCreatedDescription": "Target has been created successfully",
|
||||
"targetErrorCreate": "Failed to create target",
|
||||
"targetErrorCreateDescription": "An error occurred while creating the target",
|
||||
"tlsServerName": "TLS Server Name",
|
||||
"tlsServerNameDescription": "The TLS server name to use for SNI",
|
||||
"save": "Save",
|
||||
"proxyAdditional": "Additional Proxy Settings",
|
||||
"proxyAdditionalDescription": "Configure how the resource handles proxy settings",
|
||||
@@ -2123,9 +2125,9 @@
|
||||
"olmUpdateAvailableInfo": "An updated version of Olm is available. Please update to the latest version for the best experience.",
|
||||
"client": "Client",
|
||||
"proxyProtocol": "Proxy Protocol Settings",
|
||||
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP/UDP services.",
|
||||
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP services.",
|
||||
"enableProxyProtocol": "Enable Proxy Protocol",
|
||||
"proxyProtocolInfo": "Preserve client IP addresses for TCP/UDP backends",
|
||||
"proxyProtocolInfo": "Preserve client IP addresses for TCP backends",
|
||||
"proxyProtocolVersion": "Proxy Protocol Version",
|
||||
"version1": " Version 1 (Recommended)",
|
||||
"version2": "Version 2",
|
||||
|
||||
2099
messages/zh-TW.json
Normal file
2099
messages/zh-TW.json
Normal file
File diff suppressed because it is too large
Load Diff
1947
package-lock.json
generated
1947
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -86,7 +86,7 @@
|
||||
"eslint-config-next": "16.0.3",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "8.2.1",
|
||||
"glob": "11.0.3",
|
||||
"glob": "11.1.0",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.0",
|
||||
"i": "^0.3.7",
|
||||
@@ -105,7 +105,7 @@
|
||||
"node-cache": "5.1.2",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.10",
|
||||
"npm": "^11.6.2",
|
||||
"npm": "^11.6.4",
|
||||
"nprogress": "^0.2.0",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "^8.16.2",
|
||||
|
||||
@@ -176,7 +176,8 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
|
||||
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
|
||||
hcMethod: varchar("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName"),
|
||||
});
|
||||
|
||||
export const exitNodes = pgTable("exitNodes", {
|
||||
|
||||
@@ -195,7 +195,8 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
|
||||
}).default(true),
|
||||
hcMethod: text("hcMethod").default("GET"),
|
||||
hcStatus: integer("hcStatus"), // http code
|
||||
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
|
||||
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
|
||||
hcTlsServerName: text("hcTlsServerName"),
|
||||
});
|
||||
|
||||
export const exitNodes = sqliteTable("exitNodes", {
|
||||
|
||||
@@ -221,6 +221,7 @@ export async function updateProxyResources(
|
||||
domainId: domain ? domain.domainId : null,
|
||||
enabled: resourceEnabled,
|
||||
sso: resourceData.auth?.["sso-enabled"] || false,
|
||||
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
|
||||
ssl: resourceSsl,
|
||||
setHostHeader: resourceData["host-header"] || null,
|
||||
tlsServerName: resourceData["tls-server-name"] || null,
|
||||
@@ -610,6 +611,7 @@ export async function updateProxyResources(
|
||||
domainId: domain ? domain.domainId : null,
|
||||
enabled: resourceEnabled,
|
||||
sso: resourceData.auth?.["sso-enabled"] || false,
|
||||
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
|
||||
setHostHeader: resourceData["host-header"] || null,
|
||||
tlsServerName: resourceData["tls-server-name"] || null,
|
||||
ssl: resourceSsl,
|
||||
|
||||
@@ -59,6 +59,7 @@ export const AuthSchema = z.object({
|
||||
}),
|
||||
"sso-users": z.array(z.email()).optional().default([]),
|
||||
"whitelist-users": z.array(z.email()).optional().default([]),
|
||||
"auto-login-idp": z.int().positive().optional(),
|
||||
});
|
||||
|
||||
export const RuleSchema = z.object({
|
||||
|
||||
@@ -2,7 +2,7 @@ import path from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
// This is a placeholder value replaced by the build process
|
||||
export const APP_VERSION = "1.12.1";
|
||||
export const APP_VERSION = "1.12.3";
|
||||
|
||||
export const __FILENAME = fileURLToPath(import.meta.url);
|
||||
export const __DIRNAME = path.dirname(__FILENAME);
|
||||
|
||||
111
server/lib/lock.ts
Normal file
111
server/lib/lock.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
export class LockManager {
|
||||
/**
|
||||
* Acquire a distributed lock using Redis SET with NX and PX options
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired, false otherwise
|
||||
*/
|
||||
async acquireLock(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000
|
||||
): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a lock using Lua script to ensure atomicity
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async releaseLock(lockKey: string): Promise<void> {}
|
||||
|
||||
/**
|
||||
* Force release a lock regardless of owner (use with caution)
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async forceReleaseLock(lockKey: string): Promise<void> {}
|
||||
|
||||
/**
|
||||
* Check if a lock exists and get its info
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
|
||||
*/
|
||||
async getLockInfo(lockKey: string): Promise<{
|
||||
exists: boolean;
|
||||
ownedByMe: boolean;
|
||||
ttl: number;
|
||||
owner?: string;
|
||||
}> {
|
||||
return { exists: true, ownedByMe: true, ttl: 0 };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extend the TTL of an existing lock owned by this worker
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - New TTL in milliseconds
|
||||
* @returns Promise<boolean> - true if extended successfully
|
||||
*/
|
||||
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to acquire lock with retries and exponential backoff
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @param maxRetries - Maximum number of retry attempts
|
||||
* @param baseDelayMs - Base delay between retries in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired
|
||||
*/
|
||||
async acquireLockWithRetry(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000,
|
||||
maxRetries: number = 5,
|
||||
baseDelayMs: number = 100
|
||||
): Promise<boolean> {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function while holding a lock
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param fn - Function to execute while holding the lock
|
||||
* @param ttlMs - Lock TTL in milliseconds
|
||||
* @returns Promise<T> - Result of the executed function
|
||||
*/
|
||||
async withLock<T>(
|
||||
lockKey: string,
|
||||
fn: () => Promise<T>,
|
||||
ttlMs: number = 30000
|
||||
): Promise<T> {
|
||||
const acquired = await this.acquireLock(lockKey, ttlMs);
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error(`Failed to acquire lock: ${lockKey}`);
|
||||
}
|
||||
|
||||
try {
|
||||
return await fn();
|
||||
} finally {
|
||||
await this.releaseLock(lockKey);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up expired locks - Redis handles this automatically, but this method
|
||||
* can be used to get statistics about locks
|
||||
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
|
||||
*/
|
||||
async getLockStatistics(): Promise<{
|
||||
activeLocksCount: number;
|
||||
locksOwnedByMe: number;
|
||||
}> {
|
||||
return { activeLocksCount: 0, locksOwnedByMe: 0 };
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Redis connection
|
||||
*/
|
||||
async disconnect(): Promise<void> {}
|
||||
}
|
||||
|
||||
export const lockManager = new LockManager();
|
||||
363
server/private/lib/lock.ts
Normal file
363
server/private/lib/lock.ts
Normal file
@@ -0,0 +1,363 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { config } from "@server/lib/config";
|
||||
import logger from "@server/logger";
|
||||
import { redis } from "#private/lib/redis";
|
||||
|
||||
export class LockManager {
|
||||
/**
|
||||
* Acquire a distributed lock using Redis SET with NX and PX options
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired, false otherwise
|
||||
*/
|
||||
async acquireLock(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000
|
||||
): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
}
|
||||
|
||||
const lockValue = `${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}:${Date.now()}`;
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
|
||||
// This is atomic and handles both setting and expiration
|
||||
const result = await redis.set(
|
||||
redisKey,
|
||||
lockValue,
|
||||
"PX",
|
||||
ttlMs,
|
||||
"NX"
|
||||
);
|
||||
|
||||
if (result === "OK") {
|
||||
logger.debug(
|
||||
`Lock acquired: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if the existing lock is from this worker (reentrant behavior)
|
||||
const existingValue = await redis.get(redisKey);
|
||||
if (
|
||||
existingValue &&
|
||||
existingValue.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)
|
||||
) {
|
||||
// Extend the lock TTL since it's the same worker
|
||||
await redis.pexpire(redisKey, ttlMs);
|
||||
logger.debug(
|
||||
`Lock extended: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to acquire lock ${lockKey}:`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a lock using Lua script to ensure atomicity
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async releaseLock(lockKey: string): Promise<void> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return;
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
// Lua script to ensure we only delete the lock if it belongs to this worker
|
||||
const luaScript = `
|
||||
local key = KEYS[1]
|
||||
local worker_prefix = ARGV[1]
|
||||
local current_value = redis.call('GET', key)
|
||||
|
||||
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
|
||||
return redis.call('DEL', key)
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`;
|
||||
|
||||
try {
|
||||
const result = (await redis.eval(
|
||||
luaScript,
|
||||
1,
|
||||
redisKey,
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)) as number;
|
||||
|
||||
if (result === 1) {
|
||||
logger.debug(
|
||||
`Lock released: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
} else {
|
||||
logger.warn(
|
||||
`Lock not released - not owned by worker: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to release lock ${lockKey}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Force release a lock regardless of owner (use with caution)
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async forceReleaseLock(lockKey: string): Promise<void> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return;
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
const result = await redis.del(redisKey);
|
||||
if (result === 1) {
|
||||
logger.debug(`Lock force released: ${lockKey}`);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to force release lock ${lockKey}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a lock exists and get its info
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
|
||||
*/
|
||||
async getLockInfo(lockKey: string): Promise<{
|
||||
exists: boolean;
|
||||
ownedByMe: boolean;
|
||||
ttl: number;
|
||||
owner?: string;
|
||||
}> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return { exists: false, ownedByMe: true, ttl: 0 };
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
const [value, ttl] = await Promise.all([
|
||||
redis.get(redisKey),
|
||||
redis.pttl(redisKey)
|
||||
]);
|
||||
|
||||
const exists = value !== null;
|
||||
const ownedByMe =
|
||||
exists &&
|
||||
value!.startsWith(`${config.getRawConfig().gerbil.exit_node_name}:`);
|
||||
const owner = exists ? value!.split(":")[0] : undefined;
|
||||
|
||||
return {
|
||||
exists,
|
||||
ownedByMe,
|
||||
ttl: ttl > 0 ? ttl : 0,
|
||||
owner
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`Failed to get lock info ${lockKey}:`, error);
|
||||
return { exists: false, ownedByMe: false, ttl: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extend the TTL of an existing lock owned by this worker
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - New TTL in milliseconds
|
||||
* @returns Promise<boolean> - true if extended successfully
|
||||
*/
|
||||
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
// Lua script to extend TTL only if lock is owned by this worker
|
||||
const luaScript = `
|
||||
local key = KEYS[1]
|
||||
local worker_prefix = ARGV[1]
|
||||
local ttl = tonumber(ARGV[2])
|
||||
local current_value = redis.call('GET', key)
|
||||
|
||||
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
|
||||
return redis.call('PEXPIRE', key, ttl)
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`;
|
||||
|
||||
try {
|
||||
const result = (await redis.eval(
|
||||
luaScript,
|
||||
1,
|
||||
redisKey,
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`,
|
||||
ttlMs.toString()
|
||||
)) as number;
|
||||
|
||||
if (result === 1) {
|
||||
logger.debug(
|
||||
`Lock extended: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
} for ${ttlMs}ms`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to extend lock ${lockKey}:`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to acquire lock with retries and exponential backoff
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @param maxRetries - Maximum number of retry attempts
|
||||
* @param baseDelayMs - Base delay between retries in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired
|
||||
*/
|
||||
async acquireLockWithRetry(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000,
|
||||
maxRetries: number = 5,
|
||||
baseDelayMs: number = 100
|
||||
): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
const acquired = await this.acquireLock(lockKey, ttlMs);
|
||||
|
||||
if (acquired) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (attempt < maxRetries) {
|
||||
// Exponential backoff with jitter
|
||||
const delay =
|
||||
baseDelayMs * Math.pow(2, attempt) + Math.random() * 100;
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
`Failed to acquire lock ${lockKey} after ${maxRetries + 1} attempts`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function while holding a lock
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param fn - Function to execute while holding the lock
|
||||
* @param ttlMs - Lock TTL in milliseconds
|
||||
* @returns Promise<T> - Result of the executed function
|
||||
*/
|
||||
async withLock<T>(
|
||||
lockKey: string,
|
||||
fn: () => Promise<T>,
|
||||
ttlMs: number = 30000
|
||||
): Promise<T> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return await fn();
|
||||
}
|
||||
|
||||
const acquired = await this.acquireLock(lockKey, ttlMs);
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error(`Failed to acquire lock: ${lockKey}`);
|
||||
}
|
||||
|
||||
try {
|
||||
return await fn();
|
||||
} finally {
|
||||
await this.releaseLock(lockKey);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up expired locks - Redis handles this automatically, but this method
|
||||
* can be used to get statistics about locks
|
||||
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
|
||||
*/
|
||||
async getLockStatistics(): Promise<{
|
||||
activeLocksCount: number;
|
||||
locksOwnedByMe: number;
|
||||
}> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return { activeLocksCount: 0, locksOwnedByMe: 0 };
|
||||
}
|
||||
|
||||
try {
|
||||
const keys = await redis.keys("lock:*");
|
||||
let locksOwnedByMe = 0;
|
||||
|
||||
if (keys.length > 0) {
|
||||
const values = await redis.mget(...keys);
|
||||
locksOwnedByMe = values.filter(
|
||||
(value) =>
|
||||
value &&
|
||||
value.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)
|
||||
).length;
|
||||
}
|
||||
|
||||
return {
|
||||
activeLocksCount: keys.length,
|
||||
locksOwnedByMe
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error("Failed to get lock statistics:", error);
|
||||
return { activeLocksCount: 0, locksOwnedByMe: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Redis connection
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return;
|
||||
}
|
||||
await redis.quit();
|
||||
}
|
||||
}
|
||||
|
||||
export const lockManager = new LockManager();
|
||||
@@ -1743,7 +1743,12 @@ hybridRouter.post(
|
||||
tls: logEntry.tls
|
||||
}));
|
||||
|
||||
await db.insert(requestAuditLog).values(logEntries);
|
||||
// batch them into inserts of 100 to avoid exceeding parameter limits
|
||||
const batchSize = 100;
|
||||
for (let i = 0; i < logEntries.length; i += batchSize) {
|
||||
const batch = logEntries.slice(i, i + batchSize);
|
||||
await db.insert(requestAuditLog).values(batch);
|
||||
}
|
||||
|
||||
return response(res, {
|
||||
data: null,
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { db, exitNodeOrgs, newts } from "@server/db";
|
||||
import { db, ExitNode, exitNodeOrgs, newts, Transaction } from "@server/db";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import { exitNodes, Newt, resources, sites, Target, targets } from "@server/db";
|
||||
import { targetHealthCheck } from "@server/db";
|
||||
import { eq, and, sql, inArray } from "drizzle-orm";
|
||||
import { eq, and, sql, inArray, ne } from "drizzle-orm";
|
||||
import { addPeer, deletePeer } from "../gerbil/peers";
|
||||
import logger from "@server/logger";
|
||||
import config from "@server/lib/config";
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
verifyExitNodeOrgAccess
|
||||
} from "#dynamic/lib/exitNodes";
|
||||
import { fetchContainers } from "./dockerSocket";
|
||||
import { lockManager } from "#dynamic/lib/lock";
|
||||
|
||||
export type ExitNodePingResult = {
|
||||
exitNodeId: number;
|
||||
@@ -151,27 +152,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
|
||||
return;
|
||||
}
|
||||
|
||||
const sitesQuery = await db
|
||||
.select({
|
||||
subnet: sites.subnet
|
||||
})
|
||||
.from(sites)
|
||||
.where(eq(sites.exitNodeId, exitNodeId));
|
||||
const newSubnet = await getUniqueSubnetForSite(exitNode);
|
||||
|
||||
const blockSize = config.getRawConfig().gerbil.site_block_size;
|
||||
const subnets = sitesQuery
|
||||
.map((site) => site.subnet)
|
||||
.filter(
|
||||
(subnet) =>
|
||||
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
|
||||
)
|
||||
.filter((subnet) => subnet !== null);
|
||||
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
|
||||
const newSubnet = findNextAvailableCidr(
|
||||
subnets,
|
||||
blockSize,
|
||||
exitNode.address
|
||||
);
|
||||
if (!newSubnet) {
|
||||
logger.error(
|
||||
`No available subnets found for the new exit node id ${exitNodeId} and site id ${siteId}`
|
||||
@@ -272,7 +254,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
|
||||
hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval,
|
||||
hcTimeout: targetHealthCheck.hcTimeout,
|
||||
hcHeaders: targetHealthCheck.hcHeaders,
|
||||
hcMethod: targetHealthCheck.hcMethod
|
||||
hcMethod: targetHealthCheck.hcMethod,
|
||||
hcTlsServerName: targetHealthCheck.hcTlsServerName,
|
||||
})
|
||||
.from(targets)
|
||||
.innerJoin(resources, eq(targets.resourceId, resources.resourceId))
|
||||
@@ -344,7 +327,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
|
||||
hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds
|
||||
hcTimeout: target.hcTimeout, // in seconds
|
||||
hcHeaders: hcHeadersSend,
|
||||
hcMethod: target.hcMethod
|
||||
hcMethod: target.hcMethod,
|
||||
hcTlsServerName: target.hcTlsServerName,
|
||||
};
|
||||
});
|
||||
|
||||
@@ -376,3 +360,39 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
|
||||
excludeSender: false // Include sender in broadcast
|
||||
};
|
||||
};
|
||||
|
||||
async function getUniqueSubnetForSite(
|
||||
exitNode: ExitNode,
|
||||
trx: Transaction | typeof db = db
|
||||
): Promise<string | null> {
|
||||
const lockKey = `subnet-allocation:${exitNode.exitNodeId}`;
|
||||
|
||||
return await lockManager.withLock(
|
||||
lockKey,
|
||||
async () => {
|
||||
const sitesQuery = await trx
|
||||
.select({
|
||||
subnet: sites.subnet
|
||||
})
|
||||
.from(sites)
|
||||
.where(eq(sites.exitNodeId, exitNode.exitNodeId));
|
||||
|
||||
const blockSize = config.getRawConfig().gerbil.site_block_size;
|
||||
const subnets = sitesQuery
|
||||
.map((site) => site.subnet)
|
||||
.filter(
|
||||
(subnet) =>
|
||||
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
|
||||
)
|
||||
.filter((subnet) => subnet !== null);
|
||||
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
|
||||
const newSubnet = findNextAvailableCidr(
|
||||
subnets,
|
||||
blockSize,
|
||||
exitNode.address
|
||||
);
|
||||
return newSubnet;
|
||||
},
|
||||
5000 // 5 second lock TTL - subnet allocation should be quick
|
||||
);
|
||||
}
|
||||
|
||||
@@ -66,7 +66,8 @@ export async function addTargets(
|
||||
hcUnhealthyInterval: hc.hcUnhealthyInterval, // in seconds
|
||||
hcTimeout: hc.hcTimeout, // in seconds
|
||||
hcHeaders: hcHeadersSend,
|
||||
hcMethod: hc.hcMethod
|
||||
hcMethod: hc.hcMethod,
|
||||
hcTlsServerName: hc.hcTlsServerName,
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
@@ -198,6 +198,62 @@ export async function createSite(
|
||||
}
|
||||
}
|
||||
|
||||
if (subnet && exitNodeId) {
|
||||
//make sure the subnet is in the range of the exit node if provided
|
||||
const [exitNode] = await db
|
||||
.select()
|
||||
.from(exitNodes)
|
||||
.where(eq(exitNodes.exitNodeId, exitNodeId));
|
||||
|
||||
if (!exitNode) {
|
||||
return next(
|
||||
createHttpError(HttpCode.NOT_FOUND, "Exit node not found")
|
||||
);
|
||||
}
|
||||
|
||||
if (!exitNode.address) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Exit node has no subnet defined"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const subnetIp = subnet.split("/")[0];
|
||||
|
||||
if (!isIpInCidr(subnetIp, exitNode.address)) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Subnet is not in the CIDR range of the exit node address."
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// lets also make sure there is no overlap with other sites on the exit node
|
||||
const sitesQuery = await db
|
||||
.select({
|
||||
subnet: sites.subnet
|
||||
})
|
||||
.from(sites)
|
||||
.where(
|
||||
and(
|
||||
eq(sites.exitNodeId, exitNodeId),
|
||||
eq(sites.subnet, subnet)
|
||||
)
|
||||
);
|
||||
|
||||
if (sitesQuery.length > 0) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.CONFLICT,
|
||||
`Subnet ${subnet} overlaps with an existing site on this exit node. Please restart site creation.`
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const niceId = await getUniqueSiteName(orgId);
|
||||
|
||||
let newSite: Site;
|
||||
|
||||
@@ -48,6 +48,7 @@ const createTargetSchema = z.strictObject({
|
||||
hcFollowRedirects: z.boolean().optional().nullable(),
|
||||
hcMethod: z.string().min(1).optional().nullable(),
|
||||
hcStatus: z.int().optional().nullable(),
|
||||
hcTlsServerName: z.string().optional().nullable(),
|
||||
path: z.string().optional().nullable(),
|
||||
pathMatchType: z
|
||||
.enum(["exact", "prefix", "regex"])
|
||||
@@ -247,7 +248,8 @@ export async function createTarget(
|
||||
hcFollowRedirects: targetData.hcFollowRedirects ?? null,
|
||||
hcMethod: targetData.hcMethod ?? null,
|
||||
hcStatus: targetData.hcStatus ?? null,
|
||||
hcHealth: "unknown"
|
||||
hcHealth: "unknown",
|
||||
hcTlsServerName: targetData.hcTlsServerName ?? null
|
||||
})
|
||||
.returning();
|
||||
|
||||
|
||||
@@ -57,6 +57,7 @@ function queryTargets(resourceId: number) {
|
||||
hcMethod: targetHealthCheck.hcMethod,
|
||||
hcStatus: targetHealthCheck.hcStatus,
|
||||
hcHealth: targetHealthCheck.hcHealth,
|
||||
hcTlsServerName: targetHealthCheck.hcTlsServerName,
|
||||
path: targets.path,
|
||||
pathMatchType: targets.pathMatchType,
|
||||
rewritePath: targets.rewritePath,
|
||||
|
||||
@@ -42,6 +42,7 @@ const updateTargetBodySchema = z.strictObject({
|
||||
hcFollowRedirects: z.boolean().optional().nullable(),
|
||||
hcMethod: z.string().min(1).optional().nullable(),
|
||||
hcStatus: z.int().optional().nullable(),
|
||||
hcTlsServerName: z.string().optional().nullable(),
|
||||
path: z.string().optional().nullable(),
|
||||
pathMatchType: z.enum(["exact", "prefix", "regex"]).optional().nullable(),
|
||||
rewritePath: z.string().optional().nullable(),
|
||||
@@ -217,7 +218,8 @@ export async function updateTarget(
|
||||
hcHeaders: hcHeaders,
|
||||
hcFollowRedirects: parsedBody.data.hcFollowRedirects,
|
||||
hcMethod: parsedBody.data.hcMethod,
|
||||
hcStatus: parsedBody.data.hcStatus
|
||||
hcStatus: parsedBody.data.hcStatus,
|
||||
hcTlsServerName: parsedBody.data.hcTlsServerName,
|
||||
})
|
||||
.where(eq(targetHealthCheck.targetId, targetId))
|
||||
.returning();
|
||||
|
||||
@@ -464,6 +464,7 @@ export default function ReverseProxyTargets(props: {
|
||||
hcStatus: null,
|
||||
hcMode: null,
|
||||
hcUnhealthyInterval: null,
|
||||
hcTlsServerName: null,
|
||||
siteType: sites.length > 0 ? sites[0].type : null,
|
||||
new: true,
|
||||
updated: false
|
||||
@@ -629,7 +630,8 @@ export default function ReverseProxyTargets(props: {
|
||||
hcHealth: "unknown",
|
||||
hcStatus: null,
|
||||
hcMode: null,
|
||||
hcUnhealthyInterval: null
|
||||
hcUnhealthyInterval: null,
|
||||
hcTlsServerName: null,
|
||||
};
|
||||
|
||||
setTargets([...targets, newTarget]);
|
||||
@@ -730,7 +732,8 @@ export default function ReverseProxyTargets(props: {
|
||||
hcMethod: target.hcMethod || null,
|
||||
hcStatus: target.hcStatus || null,
|
||||
hcUnhealthyInterval: target.hcUnhealthyInterval || null,
|
||||
hcMode: target.hcMode || null
|
||||
hcMode: target.hcMode || null,
|
||||
hcTlsServerName: target.hcTlsServerName,
|
||||
};
|
||||
|
||||
// Only include path-related fields for HTTP resources
|
||||
@@ -1831,7 +1834,9 @@ export default function ReverseProxyTargets(props: {
|
||||
hcMode: selectedTargetForHealthCheck.hcMode || "http",
|
||||
hcUnhealthyInterval:
|
||||
selectedTargetForHealthCheck.hcUnhealthyInterval ||
|
||||
30
|
||||
30,
|
||||
hcTlsServerName: selectedTargetForHealthCheck.hcTlsServerName ||
|
||||
undefined,
|
||||
}}
|
||||
onChanges={async (config) => {
|
||||
if (selectedTargetForHealthCheck) {
|
||||
|
||||
@@ -297,6 +297,7 @@ export default function Page() {
|
||||
hcStatus: null,
|
||||
hcMode: null,
|
||||
hcUnhealthyInterval: null,
|
||||
hcTlsServerName: null,
|
||||
siteType: sites.length > 0 ? sites[0].type : null,
|
||||
new: true,
|
||||
updated: false
|
||||
@@ -454,7 +455,8 @@ export default function Page() {
|
||||
hcHealth: "unknown",
|
||||
hcStatus: null,
|
||||
hcMode: null,
|
||||
hcUnhealthyInterval: null
|
||||
hcUnhealthyInterval: null,
|
||||
hcTlsServerName: null
|
||||
};
|
||||
|
||||
setTargets([...targets, newTarget]);
|
||||
@@ -576,7 +578,8 @@ export default function Page() {
|
||||
target.hcFollowRedirects || null,
|
||||
hcStatus: target.hcStatus || null,
|
||||
hcUnhealthyInterval: target.hcUnhealthyInterval || null,
|
||||
hcMode: target.hcMode || null
|
||||
hcMode: target.hcMode || null,
|
||||
hcTlsServerName: target.hcTlsServerName
|
||||
};
|
||||
|
||||
// Only include path-related fields for HTTP resources
|
||||
@@ -1809,7 +1812,10 @@ export default function Page() {
|
||||
"http",
|
||||
hcUnhealthyInterval:
|
||||
selectedTargetForHealthCheck.hcUnhealthyInterval ||
|
||||
30
|
||||
30,
|
||||
hcTlsServerName:
|
||||
selectedTargetForHealthCheck.hcTlsServerName ||
|
||||
undefined
|
||||
}}
|
||||
onChanges={async (config) => {
|
||||
if (selectedTargetForHealthCheck) {
|
||||
|
||||
@@ -51,6 +51,7 @@ type HealthCheckConfig = {
|
||||
hcFollowRedirects: boolean;
|
||||
hcMode: string;
|
||||
hcUnhealthyInterval: number;
|
||||
hcTlsServerName: string;
|
||||
};
|
||||
|
||||
type HealthCheckDialogProps = {
|
||||
@@ -109,7 +110,8 @@ export default function HealthCheckDialog({
|
||||
),
|
||||
hcFollowRedirects: z.boolean(),
|
||||
hcMode: z.string(),
|
||||
hcUnhealthyInterval: z.int().positive().min(5)
|
||||
hcUnhealthyInterval: z.int().positive().min(5),
|
||||
hcTlsServerName: z.string()
|
||||
});
|
||||
|
||||
const form = useForm<z.infer<typeof healthCheckSchema>>({
|
||||
@@ -147,7 +149,8 @@ export default function HealthCheckDialog({
|
||||
: "",
|
||||
hcFollowRedirects: initialConfig?.hcFollowRedirects,
|
||||
hcMode: initialConfig?.hcMode,
|
||||
hcUnhealthyInterval: initialConfig?.hcUnhealthyInterval
|
||||
hcUnhealthyInterval: initialConfig?.hcUnhealthyInterval,
|
||||
hcTlsServerName: initialConfig?.hcTlsServerName ?? ""
|
||||
});
|
||||
}, [open]);
|
||||
|
||||
@@ -554,6 +557,37 @@ export default function HealthCheckDialog({
|
||||
)}
|
||||
/>
|
||||
|
||||
{/*TLS Server Name (SNI)*/}
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="hcTlsServerName"
|
||||
render={({ field }) => (
|
||||
<FormItem>
|
||||
<FormLabel>
|
||||
{t("tlsServerName")}
|
||||
</FormLabel>
|
||||
<FormControl>
|
||||
<Input
|
||||
{...field}
|
||||
onChange={(e) => {
|
||||
field.onChange(e);
|
||||
handleFieldChange(
|
||||
"hcTlsServerName",
|
||||
e.target.value
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</FormControl>
|
||||
<FormDescription>
|
||||
{t(
|
||||
"tlsServerNameDescription"
|
||||
)}
|
||||
</FormDescription>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
|
||||
{/* Custom Headers */}
|
||||
<FormField
|
||||
control={form.control}
|
||||
|
||||
@@ -56,6 +56,10 @@ export default function LocaleSwitcher() {
|
||||
{
|
||||
value: "nb-NO",
|
||||
label: "Norsk (Bokmål)"
|
||||
},
|
||||
{
|
||||
value: "zh-TW",
|
||||
label: "繁體中文"
|
||||
}
|
||||
]}
|
||||
/>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
export type Locale = (typeof locales)[number];
|
||||
|
||||
export const locales = ['en-US', 'es-ES', 'fr-FR', 'de-DE', 'nl-NL', 'it-IT', 'pl-PL', 'pt-PT', 'tr-TR', 'zh-CN', 'ko-KR', 'bg-BG', 'cs-CZ', 'ru-RU', 'nb-NO'] as const;
|
||||
export const locales = ['en-US', 'es-ES', 'fr-FR', 'de-DE', 'nl-NL', 'it-IT', 'pl-PL', 'pt-PT', 'tr-TR', 'zh-CN', 'ko-KR', 'bg-BG', 'cs-CZ', 'ru-RU', 'nb-NO', 'zh-TW'] as const;
|
||||
export const defaultLocale: Locale = 'en-US';
|
||||
Reference in New Issue
Block a user