Compare commits

...

18 Commits

Author SHA1 Message Date
Laurence
5eacbb7239 fix(proxy): prevent deleting wrong tunnel in defer cleanup
Add pointer check before delete to handle race where UpdateLocalSNIs
removes our tunnel and a new one is created for the same hostname.
2026-03-13 16:43:16 +00:00
Laurence
d21c09c84f refactor(proxy): simplify tunnel tracking with mutex-only approach
Remove atomic counter in favor of simple int protected by mutex.
Eliminates race condition complexity and recheck logic.
2026-03-13 16:36:56 +00:00
Laurence
28c65b950c fix(proxy): avoid shadowing ctx variable in pipe() 2026-03-13 15:51:23 +00:00
Laurence
1643d71905 refactor(proxy): use context cancellation for tunnel tracking
- Replace []net.Conn slice with context + atomic counter in activeTunnel
- Use errgroup.WithContext for pipe() to handle goroutine lifecycle
- Use context.AfterFunc to close connections on cancellation
- Fix race condition by comparing tunnel pointers instead of map lookup
- UpdateLocalSNIs now cancels tunnel context instead of iterating conns

This eliminates O(n) connection removal, prevents goroutine leaks,
and provides cleaner cancellation semantics.
2026-03-13 15:47:52 +00:00
Owen
b9261b8fea Add optional tc 2026-02-27 15:45:17 -08:00
Owen
c3e73d0189 Merge branch 'main' of github.com:fosrl/gerbil 2026-01-26 15:22:20 -08:00
dependabot[bot]
df2fbdf160 Bump golang.org/x/crypto in the prod-minor-updates group
Bumps the prod-minor-updates group with 1 update: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.45.0 to 0.46.0
- [Commits](https://github.com/golang/crypto/compare/v0.45.0...v0.46.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-version: 0.46.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: prod-minor-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-26 15:22:08 -08:00
dependabot[bot]
cb4ac8199d Bump actions/checkout from 6.0.0 to 6.0.1
Bumps [actions/checkout](https://github.com/actions/checkout) from 6.0.0 to 6.0.1.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](1af3b93b68...8e8c483db8)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 6.0.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-26 15:22:08 -08:00
dependabot[bot]
dd4b86b3e5 Bump actions/upload-artifact from 5.0.0 to 6.0.0
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 5.0.0 to 6.0.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](330a01c490...b7c566a772)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: 6.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-26 15:22:08 -08:00
dependabot[bot]
bad290aa4e Bump docker/setup-buildx-action from 3.11.1 to 3.12.0
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.11.1 to 3.12.0.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](e468171a9d...8d2750c68a)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-version: 3.12.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-26 15:22:08 -08:00
dependabot[bot]
8c27d5e3bf Bump actions/setup-go from 6.1.0 to 6.2.0
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 6.1.0 to 6.2.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](4dc6199c7b...7a3fe6cf4c)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: 6.2.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-01-26 15:22:08 -08:00
Owen
7e7a37d49c We dont really support the config file anymore
Ref #30
2026-01-26 15:22:08 -08:00
Owen
d44aa97f32 Try to fix mem leak 2026-01-26 15:22:08 -08:00
Owen
b57ad74589 Quiet normal log message
Fixes #2057
2026-01-26 15:22:08 -08:00
Owen
82256a3f6f Add healthcheck route 2026-01-26 15:22:08 -08:00
Owen
9e140a94db Add regex to runs on 2026-01-26 15:22:08 -08:00
Owen
d0c9ea5a57 Fix docker username issue 2026-01-26 15:22:08 -08:00
Owen
c88810ef24 Restrict inbound traffic 2026-01-26 15:21:28 -08:00
3 changed files with 353 additions and 71 deletions

339
main.go
View File

@@ -33,15 +33,16 @@ import (
)
var (
interfaceName string
listenAddr string
mtuInt int
lastReadings = make(map[string]PeerReading)
mu sync.Mutex
wgMu sync.Mutex // Protects WireGuard operations
notifyURL string
proxyRelay *relay.UDPProxyServer
proxySNI *proxy.SNIProxy
interfaceName string
listenAddr string
mtuInt int
lastReadings = make(map[string]PeerReading)
mu sync.Mutex
wgMu sync.Mutex // Protects WireGuard operations
notifyURL string
proxyRelay *relay.UDPProxyServer
proxySNI *proxy.SNIProxy
doTrafficShaping bool
)
type WgConfig struct {
@@ -151,6 +152,7 @@ func main() {
localOverridesStr = os.Getenv("LOCAL_OVERRIDES")
trustedUpstreamsStr = os.Getenv("TRUSTED_UPSTREAMS")
proxyProtocolStr := os.Getenv("PROXY_PROTOCOL")
doTrafficShapingStr := os.Getenv("DO_TRAFFIC_SHAPING")
if interfaceName == "" {
flag.StringVar(&interfaceName, "interface", "wg0", "Name of the WireGuard interface")
@@ -222,6 +224,13 @@ func main() {
flag.BoolVar(&proxyProtocol, "proxy-protocol", true, "Enable PROXY protocol v1 for preserving client IP")
}
if doTrafficShapingStr != "" {
doTrafficShaping = strings.ToLower(doTrafficShapingStr) == "true"
}
if doTrafficShapingStr == "" {
flag.BoolVar(&doTrafficShaping, "do-traffic-shaping", false, "Whether to set up traffic shaping rules for peers (requires tc command and root privileges)")
}
flag.Parse()
logger.Init()
@@ -555,6 +564,10 @@ func ensureWireguardInterface(wgconfig WgConfig) error {
logger.Warn("Failed to ensure MSS clamping: %v", err)
}
if err := ensureWireguardFirewall(); err != nil {
logger.Warn("Failed to ensure WireGuard firewall rules: %v", err)
}
logger.Info("WireGuard interface %s created and configured", interfaceName)
return nil
@@ -723,6 +736,113 @@ func ensureMSSClamping() error {
return nil
}
func ensureWireguardFirewall() error {
// Rules to enforce:
// 1. Allow established/related connections (responses to our outbound traffic)
// 2. Allow ICMP ping packets
// 3. Drop all other inbound traffic from peers
// Define the rules we want to ensure exist
rules := [][]string{
// Allow established and related connections (responses to outbound traffic)
{
"-A", "INPUT",
"-i", interfaceName,
"-m", "conntrack",
"--ctstate", "ESTABLISHED,RELATED",
"-j", "ACCEPT",
},
// Allow ICMP ping requests
{
"-A", "INPUT",
"-i", interfaceName,
"-p", "icmp",
"--icmp-type", "8",
"-j", "ACCEPT",
},
// Drop all other inbound traffic from WireGuard interface
{
"-A", "INPUT",
"-i", interfaceName,
"-j", "DROP",
},
}
// First, try to delete any existing rules for this interface
for _, rule := range rules {
deleteArgs := make([]string, len(rule))
copy(deleteArgs, rule)
// Change -A to -D for deletion
for i, arg := range deleteArgs {
if arg == "-A" {
deleteArgs[i] = "-D"
break
}
}
deleteCmd := exec.Command("/usr/sbin/iptables", deleteArgs...)
logger.Debug("Attempting to delete existing firewall rule: %v", deleteArgs)
// Try deletion multiple times to handle multiple existing rules
for i := 0; i < 5; i++ {
out, err := deleteCmd.CombinedOutput()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
logger.Debug("Deletion stopped: %v (output: %s)", exitErr.String(), string(out))
}
break // No more rules to delete
}
logger.Info("Deleted existing firewall rule (attempt %d)", i+1)
}
}
// Now add the rules
var errors []error
for i, rule := range rules {
addCmd := exec.Command("/usr/sbin/iptables", rule...)
logger.Info("Adding WireGuard firewall rule %d: %v", i+1, rule)
if out, err := addCmd.CombinedOutput(); err != nil {
errMsg := fmt.Sprintf("Failed to add firewall rule %d: %v (output: %s)", i+1, err, string(out))
logger.Error("%s", errMsg)
errors = append(errors, fmt.Errorf("%s", errMsg))
continue
}
// Verify the rule was added by checking
checkArgs := make([]string, len(rule))
copy(checkArgs, rule)
// Change -A to -C for check
for j, arg := range checkArgs {
if arg == "-A" {
checkArgs[j] = "-C"
break
}
}
checkCmd := exec.Command("/usr/sbin/iptables", checkArgs...)
if out, err := checkCmd.CombinedOutput(); err != nil {
errMsg := fmt.Sprintf("Rule verification failed for rule %d: %v (output: %s)", i+1, err, string(out))
logger.Error("%s", errMsg)
errors = append(errors, fmt.Errorf("%s", errMsg))
continue
}
logger.Info("Successfully added and verified WireGuard firewall rule %d", i+1)
}
if len(errors) > 0 {
var errMsgs []string
for _, err := range errors {
errMsgs = append(errMsgs, err.Error())
}
return fmt.Errorf("WireGuard firewall setup encountered errors:\n%s", strings.Join(errMsgs, "\n"))
}
logger.Info("WireGuard firewall rules successfully configured for interface %s", interfaceName)
return nil
}
func handlePeer(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodPost:
@@ -775,17 +895,23 @@ func addPeerInternal(peer Peer) error {
return fmt.Errorf("failed to parse public key: %v", err)
}
logger.Debug("Adding peer %s with AllowedIPs: %v", peer.PublicKey, peer.AllowedIPs)
// parse allowed IPs into array of net.IPNet
var allowedIPs []net.IPNet
var wgIPs []string
for _, ipStr := range peer.AllowedIPs {
logger.Debug("Parsing AllowedIP: %s", ipStr)
_, ipNet, err := net.ParseCIDR(ipStr)
if err != nil {
logger.Warn("Failed to parse allowed IP '%s' for peer %s: %v", ipStr, peer.PublicKey, err)
return fmt.Errorf("failed to parse allowed IP: %v", err)
}
allowedIPs = append(allowedIPs, *ipNet)
// Extract the IP address from the CIDR for relay cleanup
wgIPs = append(wgIPs, ipNet.IP.String())
extractedIP := ipNet.IP.String()
wgIPs = append(wgIPs, extractedIP)
logger.Debug("Extracted IP %s from AllowedIP %s", extractedIP, ipStr)
}
peerConfig := wgtypes.PeerConfig{
@@ -801,6 +927,18 @@ func addPeerInternal(peer Peer) error {
return fmt.Errorf("failed to add peer: %v", err)
}
// Setup bandwidth limiting for each peer IP
if doTrafficShaping {
logger.Debug("doTrafficShaping is true, setting up bandwidth limits for %d IPs", len(wgIPs))
for _, wgIP := range wgIPs {
if err := setupPeerBandwidthLimit(wgIP); err != nil {
logger.Warn("Failed to setup bandwidth limit for peer IP %s: %v", wgIP, err)
}
}
} else {
logger.Debug("doTrafficShaping is false, skipping bandwidth limit setup")
}
// Clear relay connections for the peer's WireGuard IPs
if proxyRelay != nil {
for _, wgIP := range wgIPs {
@@ -845,19 +983,17 @@ func removePeerInternal(publicKey string) error {
return fmt.Errorf("failed to parse public key: %v", err)
}
// Get current peer info before removing to clear relay connections
// Get current peer info before removing to clear relay connections and bandwidth limits
var wgIPs []string
if proxyRelay != nil {
device, err := wgClient.Device(interfaceName)
if err == nil {
for _, peer := range device.Peers {
if peer.PublicKey.String() == publicKey {
// Extract WireGuard IPs from this peer's allowed IPs
for _, allowedIP := range peer.AllowedIPs {
wgIPs = append(wgIPs, allowedIP.IP.String())
}
break
device, err := wgClient.Device(interfaceName)
if err == nil {
for _, peer := range device.Peers {
if peer.PublicKey.String() == publicKey {
// Extract WireGuard IPs from this peer's allowed IPs
for _, allowedIP := range peer.AllowedIPs {
wgIPs = append(wgIPs, allowedIP.IP.String())
}
break
}
}
}
@@ -875,6 +1011,15 @@ func removePeerInternal(publicKey string) error {
return fmt.Errorf("failed to remove peer: %v", err)
}
// Remove bandwidth limits for each peer IP
if doTrafficShaping {
for _, wgIP := range wgIPs {
if err := removePeerBandwidthLimit(wgIP); err != nil {
logger.Warn("Failed to remove bandwidth limit for peer IP %s: %v", wgIP, err)
}
}
}
// Clear relay connections for the peer's WireGuard IPs
if proxyRelay != nil {
for _, wgIP := range wgIPs {
@@ -1204,3 +1349,155 @@ func monitorMemory(limit uint64) {
time.Sleep(5 * time.Second)
}
}
// setupPeerBandwidthLimit sets up TC (Traffic Control) to limit bandwidth for a specific peer IP
// Currently hardcoded to 20 Mbps per peer
func setupPeerBandwidthLimit(peerIP string) error {
logger.Debug("setupPeerBandwidthLimit called for peer IP: %s", peerIP)
const bandwidthLimit = "50mbit" // 50 Mbps limit per peer
// Parse the IP to get just the IP address (strip any CIDR notation if present)
ip := peerIP
if strings.Contains(peerIP, "/") {
parsedIP, _, err := net.ParseCIDR(peerIP)
if err != nil {
return fmt.Errorf("failed to parse peer IP: %v", err)
}
ip = parsedIP.String()
}
// First, ensure we have a root qdisc on the interface (HTB - Hierarchical Token Bucket)
// Check if qdisc already exists
cmd := exec.Command("tc", "qdisc", "show", "dev", interfaceName)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to check qdisc: %v, output: %s", err, string(output))
}
// If no HTB qdisc exists, create one
if !strings.Contains(string(output), "htb") {
cmd = exec.Command("tc", "qdisc", "add", "dev", interfaceName, "root", "handle", "1:", "htb", "default", "9999")
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to add root qdisc: %v, output: %s", err, string(output))
}
logger.Info("Created HTB root qdisc on %s", interfaceName)
}
// Generate a unique class ID based on the IP address
// We'll use the last octet of the IP as part of the class ID
ipParts := strings.Split(ip, ".")
if len(ipParts) != 4 {
return fmt.Errorf("invalid IPv4 address: %s", ip)
}
lastOctet := ipParts[3]
classID := fmt.Sprintf("1:%s", lastOctet)
logger.Debug("Generated class ID %s for peer IP %s", classID, ip)
// Create a class for this peer with bandwidth limit
cmd = exec.Command("tc", "class", "add", "dev", interfaceName, "parent", "1:", "classid", classID,
"htb", "rate", bandwidthLimit, "ceil", bandwidthLimit)
if output, err := cmd.CombinedOutput(); err != nil {
logger.Debug("tc class add failed for %s: %v, output: %s", ip, err, string(output))
// If class already exists, try to replace it
if strings.Contains(string(output), "File exists") {
cmd = exec.Command("tc", "class", "replace", "dev", interfaceName, "parent", "1:", "classid", classID,
"htb", "rate", bandwidthLimit, "ceil", bandwidthLimit)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("failed to replace class: %v, output: %s", err, string(output))
}
logger.Debug("Successfully replaced existing class %s for peer IP %s", classID, ip)
} else {
return fmt.Errorf("failed to add class: %v, output: %s", err, string(output))
}
} else {
logger.Debug("Successfully added new class %s for peer IP %s", classID, ip)
}
// Add a filter to match traffic from this peer IP (ingress)
cmd = exec.Command("tc", "filter", "add", "dev", interfaceName, "protocol", "ip", "parent", "1:",
"prio", "1", "u32", "match", "ip", "src", ip, "flowid", classID)
if output, err := cmd.CombinedOutput(); err != nil {
// If filter fails, log but don't fail the peer addition
logger.Warn("Failed to add ingress filter for peer IP %s: %v, output: %s", ip, err, string(output))
}
// Add a filter to match traffic to this peer IP (egress)
cmd = exec.Command("tc", "filter", "add", "dev", interfaceName, "protocol", "ip", "parent", "1:",
"prio", "1", "u32", "match", "ip", "dst", ip, "flowid", classID)
if output, err := cmd.CombinedOutput(); err != nil {
// If filter fails, log but don't fail the peer addition
logger.Warn("Failed to add egress filter for peer IP %s: %v, output: %s", ip, err, string(output))
}
logger.Info("Setup bandwidth limit of %s for peer IP %s (class %s)", bandwidthLimit, ip, classID)
return nil
}
// removePeerBandwidthLimit removes TC rules for a specific peer IP
func removePeerBandwidthLimit(peerIP string) error {
// Parse the IP to get just the IP address
ip := peerIP
if strings.Contains(peerIP, "/") {
parsedIP, _, err := net.ParseCIDR(peerIP)
if err != nil {
return fmt.Errorf("failed to parse peer IP: %v", err)
}
ip = parsedIP.String()
}
// Generate the class ID based on the IP
ipParts := strings.Split(ip, ".")
if len(ipParts) != 4 {
return fmt.Errorf("invalid IPv4 address: %s", ip)
}
lastOctet := ipParts[3]
classID := fmt.Sprintf("1:%s", lastOctet)
// Remove filters for this IP
// List all filters to find the ones for this class
cmd := exec.Command("tc", "filter", "show", "dev", interfaceName, "parent", "1:")
output, err := cmd.CombinedOutput()
if err != nil {
logger.Warn("Failed to list filters for peer IP %s: %v, output: %s", ip, err, string(output))
} else {
// Parse the output to find filter handles that match this classID
// The output format includes lines like:
// filter parent 1: protocol ip pref 1 u32 chain 0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:4
lines := strings.Split(string(output), "\n")
for _, line := range lines {
// Look for lines containing our flowid (classID)
if strings.Contains(line, "flowid "+classID) && strings.Contains(line, "fh ") {
// Extract handle (format: fh 800::800)
parts := strings.Fields(line)
var handle string
for j, part := range parts {
if part == "fh" && j+1 < len(parts) {
handle = parts[j+1]
break
}
}
if handle != "" {
// Delete this filter using the handle
delCmd := exec.Command("tc", "filter", "del", "dev", interfaceName, "parent", "1:", "handle", handle, "prio", "1", "u32")
if delOutput, delErr := delCmd.CombinedOutput(); delErr != nil {
logger.Debug("Failed to delete filter handle %s for peer IP %s: %v, output: %s", handle, ip, delErr, string(delOutput))
} else {
logger.Debug("Deleted filter handle %s for peer IP %s", handle, ip)
}
}
}
}
}
// Remove the class
cmd = exec.Command("tc", "class", "del", "dev", interfaceName, "classid", classID)
if output, err := cmd.CombinedOutput(); err != nil {
// It's okay if the class doesn't exist
if !strings.Contains(string(output), "No such file or directory") && !strings.Contains(string(output), "Cannot find") {
logger.Warn("Failed to remove class for peer IP %s: %v, output: %s", ip, err, string(output))
}
}
logger.Info("Removed bandwidth limit for peer IP %s (class %s)", ip, classID)
return nil
}

View File

@@ -18,6 +18,7 @@ import (
"github.com/fosrl/gerbil/logger"
"github.com/patrickmn/go-cache"
"golang.org/x/sync/errgroup"
)
// RouteRecord represents a routing configuration
@@ -72,7 +73,9 @@ type SNIProxy struct {
}
type activeTunnel struct {
conns []net.Conn
ctx context.Context
cancel context.CancelFunc
count int // protected by activeTunnelsLock
}
// readOnlyConn is a wrapper for io.Reader that implements net.Conn
@@ -588,37 +591,32 @@ func (p *SNIProxy) handleConnection(clientConn net.Conn) {
}
}
// Track this tunnel by SNI
// Track this tunnel by SNI using context for cancellation
p.activeTunnelsLock.Lock()
tunnel, ok := p.activeTunnels[hostname]
if !ok {
tunnel = &activeTunnel{}
ctx, cancel := context.WithCancel(p.ctx)
tunnel = &activeTunnel{ctx: ctx, cancel: cancel}
p.activeTunnels[hostname] = tunnel
}
tunnel.conns = append(tunnel.conns, actualClientConn)
tunnel.count++
tunnelCtx := tunnel.ctx
p.activeTunnelsLock.Unlock()
defer func() {
// Remove this conn from active tunnels
p.activeTunnelsLock.Lock()
if tunnel, ok := p.activeTunnels[hostname]; ok {
newConns := make([]net.Conn, 0, len(tunnel.conns))
for _, c := range tunnel.conns {
if c != actualClientConn {
newConns = append(newConns, c)
}
}
if len(newConns) == 0 {
tunnel.count--
if tunnel.count == 0 {
tunnel.cancel()
if p.activeTunnels[hostname] == tunnel {
delete(p.activeTunnels, hostname)
} else {
tunnel.conns = newConns
}
}
p.activeTunnelsLock.Unlock()
}()
// Start bidirectional data transfer
p.pipe(actualClientConn, targetConn, clientReader)
// Start bidirectional data transfer with tunnel context
p.pipe(tunnelCtx, actualClientConn, targetConn, clientReader)
}
// getRoute retrieves routing information for a hostname
@@ -754,47 +752,36 @@ func (p *SNIProxy) selectStickyEndpoint(clientAddr string, endpoints []string) s
}
// pipe handles bidirectional data transfer between connections
func (p *SNIProxy) pipe(clientConn, targetConn net.Conn, clientReader io.Reader) {
var wg sync.WaitGroup
wg.Add(2)
func (p *SNIProxy) pipe(ctx context.Context, clientConn, targetConn net.Conn, clientReader io.Reader) {
g, gCtx := errgroup.WithContext(ctx)
// closeOnce ensures we only close connections once
var closeOnce sync.Once
closeConns := func() {
closeOnce.Do(func() {
// Close both connections to unblock any pending reads
clientConn.Close()
targetConn.Close()
})
}
// Close connections when context cancels to unblock io.Copy operations
context.AfterFunc(gCtx, func() {
clientConn.Close()
targetConn.Close()
})
// Copy data from client to target (using the buffered reader)
go func() {
defer wg.Done()
defer closeConns()
// Use a large buffer for better performance
// Copy data from client to target
g.Go(func() error {
buf := make([]byte, 32*1024)
_, err := io.CopyBuffer(targetConn, clientReader, buf)
if err != nil && err != io.EOF {
logger.Debug("Copy client->target error: %v", err)
}
}()
return err
})
// Copy data from target to client
go func() {
defer wg.Done()
defer closeConns()
// Use a large buffer for better performance
g.Go(func() error {
buf := make([]byte, 32*1024)
_, err := io.CopyBuffer(clientConn, targetConn, buf)
if err != nil && err != io.EOF {
logger.Debug("Copy target->client error: %v", err)
}
}()
return err
})
wg.Wait()
g.Wait()
}
// GetCacheStats returns cache statistics
@@ -830,16 +817,14 @@ func (p *SNIProxy) UpdateLocalSNIs(fullDomains []string) {
logger.Debug("Updated local SNIs, added %d, removed %d", len(newSNIs), len(removed))
// Terminate tunnels for removed SNIs
// Terminate tunnels for removed SNIs via context cancellation
if len(removed) > 0 {
p.activeTunnelsLock.Lock()
for _, sni := range removed {
if tunnels, ok := p.activeTunnels[sni]; ok {
for _, conn := range tunnels.conns {
conn.Close()
}
if tunnel, ok := p.activeTunnels[sni]; ok {
tunnel.cancel()
delete(p.activeTunnels, sni)
logger.Debug("Closed tunnels for SNI target change: %s", sni)
logger.Debug("Cancelled tunnel context for SNI target change: %s", sni)
}
}
p.activeTunnelsLock.Unlock()

View File

@@ -839,7 +839,7 @@ func (s *UDPProxyServer) clearSessionsForIP(ip string) {
s.wgSessions.Delete(key)
}
logger.Info("Cleared %d sessions for WG IP: %s", len(keysToDelete), ip)
logger.Debug("Cleared %d sessions for WG IP: %s", len(keysToDelete), ip)
}
// // clearProxyMappingsForWGIP removes all proxy mappings that have destinations pointing to a specific WireGuard IP