Compare commits

..

1 Commits

Author SHA1 Message Date
Laurence
c7d9c72f29 Add HTTP client reuse and buffer pooling for performance
- Add reusable HTTP client with connection pooling for API requests
- Add sync.Pool for 32KB buffers used in connection piping
- Clear buffers before returning to pool to prevent data leakage
- Reduces GC pressure and improves throughput under load
2026-03-13 15:28:04 +00:00

View File

@@ -69,10 +69,16 @@ type SNIProxy struct {
// Trusted upstream proxies that can send PROXY protocol
trustedUpstreams map[string]struct{}
// Reusable HTTP client for API requests
httpClient *http.Client
// Buffer pool for connection piping
bufferPool *sync.Pool
}
type activeTunnel struct {
conns map[net.Conn]struct{}
conns []net.Conn
}
// readOnlyConn is a wrapper for io.Reader that implements net.Conn
@@ -374,6 +380,20 @@ func NewSNIProxy(port int, remoteConfigURL, publicKey, localProxyAddr string, lo
localOverrides: overridesMap,
activeTunnels: make(map[string]*activeTunnel),
trustedUpstreams: trustedMap,
httpClient: &http.Client{
Timeout: 5 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
},
},
bufferPool: &sync.Pool{
New: func() interface{} {
buf := make([]byte, 32*1024)
return &buf
},
},
}
return proxy, nil
@@ -592,19 +612,26 @@ func (p *SNIProxy) handleConnection(clientConn net.Conn) {
p.activeTunnelsLock.Lock()
tunnel, ok := p.activeTunnels[hostname]
if !ok {
tunnel = &activeTunnel{conns: make(map[net.Conn]struct{})}
tunnel = &activeTunnel{}
p.activeTunnels[hostname] = tunnel
}
tunnel.conns[actualClientConn] = struct{}{}
tunnel.conns = append(tunnel.conns, actualClientConn)
p.activeTunnelsLock.Unlock()
defer func() {
// Remove this conn from active tunnels - O(1) with map
// Remove this conn from active tunnels
p.activeTunnelsLock.Lock()
if tunnel, ok := p.activeTunnels[hostname]; ok {
delete(tunnel.conns, actualClientConn)
if len(tunnel.conns) == 0 {
newConns := make([]net.Conn, 0, len(tunnel.conns))
for _, c := range tunnel.conns {
if c != actualClientConn {
newConns = append(newConns, c)
}
}
if len(newConns) == 0 {
delete(p.activeTunnels, hostname)
} else {
tunnel.conns = newConns
}
}
p.activeTunnelsLock.Unlock()
@@ -674,9 +701,8 @@ func (p *SNIProxy) getRoute(hostname, clientAddr string) (*RouteRecord, error) {
}
req.Header.Set("Content-Type", "application/json")
// Make HTTP request
client := &http.Client{Timeout: 5 * time.Second}
resp, err := client.Do(req)
// Make HTTP request using reusable client
resp, err := p.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("API request failed: %w", err)
}
@@ -766,9 +792,15 @@ func (p *SNIProxy) pipe(clientConn, targetConn net.Conn, clientReader io.Reader)
defer wg.Done()
defer closeConns()
// Use a large buffer for better performance
buf := make([]byte, 32*1024)
_, err := io.CopyBuffer(targetConn, clientReader, buf)
// Get buffer from pool and return when done
bufPtr := p.bufferPool.Get().(*[]byte)
defer func() {
// Clear buffer before returning to pool to prevent data leakage
clear(*bufPtr)
p.bufferPool.Put(bufPtr)
}()
_, err := io.CopyBuffer(targetConn, clientReader, *bufPtr)
if err != nil && err != io.EOF {
logger.Debug("Copy client->target error: %v", err)
}
@@ -779,9 +811,15 @@ func (p *SNIProxy) pipe(clientConn, targetConn net.Conn, clientReader io.Reader)
defer wg.Done()
defer closeConns()
// Use a large buffer for better performance
buf := make([]byte, 32*1024)
_, err := io.CopyBuffer(clientConn, targetConn, buf)
// Get buffer from pool and return when done
bufPtr := p.bufferPool.Get().(*[]byte)
defer func() {
// Clear buffer before returning to pool to prevent data leakage
clear(*bufPtr)
p.bufferPool.Put(bufPtr)
}()
_, err := io.CopyBuffer(clientConn, targetConn, *bufPtr)
if err != nil && err != io.EOF {
logger.Debug("Copy target->client error: %v", err)
}
@@ -803,42 +841,32 @@ func (p *SNIProxy) ClearCache() {
// UpdateLocalSNIs updates the local SNIs and invalidates cache for changed domains
func (p *SNIProxy) UpdateLocalSNIs(fullDomains []string) {
newSNIs := make(map[string]struct{}, len(fullDomains))
newSNIs := make(map[string]struct{})
for _, domain := range fullDomains {
newSNIs[domain] = struct{}{}
// Invalidate any cached route for this domain
p.cache.Delete(domain)
}
// Get old SNIs with read lock to compute diff outside write lock
p.localSNIsLock.RLock()
oldSNIs := p.localSNIs
p.localSNIsLock.RUnlock()
// Compute removed SNIs outside the lock
// Update localSNIs
p.localSNIsLock.Lock()
removed := make([]string, 0)
for sni := range oldSNIs {
for sni := range p.localSNIs {
if _, stillLocal := newSNIs[sni]; !stillLocal {
removed = append(removed, sni)
}
}
// Swap with minimal write lock hold time
p.localSNIsLock.Lock()
p.localSNIs = newSNIs
p.localSNIsLock.Unlock()
// Invalidate cache for new domains (cache is thread-safe)
for domain := range newSNIs {
p.cache.Delete(domain)
}
logger.Debug("Updated local SNIs, added %d, removed %d", len(newSNIs), len(removed))
// Terminate tunnels for removed SNIs
if len(removed) > 0 {
p.activeTunnelsLock.Lock()
for _, sni := range removed {
if tunnel, ok := p.activeTunnels[sni]; ok {
for conn := range tunnel.conns {
if tunnels, ok := p.activeTunnels[sni]; ok {
for _, conn := range tunnels.conns {
conn.Close()
}
delete(p.activeTunnels, sni)