mirror of
https://github.com/fosrl/newt.git
synced 2026-03-13 02:14:56 -05:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e60da37d1 | ||
|
|
53d79aea5a | ||
|
|
0f6852b681 | ||
|
|
2b8e280f2e | ||
|
|
3a377d43de | ||
|
|
792057cf6c | ||
|
|
57afe91e85 | ||
|
|
3389088c43 | ||
|
|
e73150c187 | ||
|
|
18556f34b2 | ||
|
|
66c235624a |
438
.github/workflows/cicd.yml
vendored
438
.github/workflows/cicd.yml
vendored
@@ -20,16 +20,6 @@ on:
|
||||
description: "SemVer version to release (e.g., 1.2.3, no leading 'v')"
|
||||
required: true
|
||||
type: string
|
||||
publish_latest:
|
||||
description: "Also publish the 'latest' image tag"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_minor:
|
||||
description: "Also publish the 'major.minor' image tag (e.g., 1.2)"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
target_branch:
|
||||
description: "Branch to tag"
|
||||
required: false
|
||||
@@ -86,9 +76,6 @@ jobs:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -96,37 +83,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Capture created timestamp
|
||||
run: echo "IMAGE_CREATED=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Normalize image names to lowercase
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
||||
shell: bash
|
||||
|
||||
- name: Extract tag name
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
@@ -196,45 +152,13 @@ jobs:
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Resolve publish-latest flag
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
PL_INPUT: ${{ inputs.publish_latest }}
|
||||
PL_VAR: ${{ vars.PUBLISH_LATEST }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
val="false"
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
if [ "${PL_INPUT}" = "true" ]; then val="true"; fi
|
||||
else
|
||||
if [ "${PL_VAR}" = "true" ]; then val="true"; fi
|
||||
fi
|
||||
echo "PUBLISH_LATEST=$val" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Resolve publish-minor flag
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
PM_INPUT: ${{ inputs.publish_minor }}
|
||||
PM_VAR: ${{ vars.PUBLISH_MINOR }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
val="false"
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
if [ "${PM_INPUT}" = "true" ]; then val="true"; fi
|
||||
else
|
||||
if [ "${PM_VAR}" = "true" ]; then val="true"; fi
|
||||
fi
|
||||
echo "PUBLISH_MINOR=$val" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Cache Go modules
|
||||
if: ${{ hashFiles('**/go.sum') != '' }}
|
||||
uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
@@ -250,326 +174,6 @@ jobs:
|
||||
go test ./... -race -covermode=atomic
|
||||
shell: bash
|
||||
|
||||
- name: Resolve license fallback
|
||||
run: echo "IMAGE_LICENSE=${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Resolve registries list (GHCR always, Docker Hub only if creds)
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
images="${GHCR_IMAGE}"
|
||||
if [ -n "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}" ] && [ -n "${{ secrets.DOCKER_HUB_USERNAME }}" ]; then
|
||||
images="${images}\n${DOCKERHUB_IMAGE}"
|
||||
fi
|
||||
{
|
||||
echo 'IMAGE_LIST<<EOF'
|
||||
echo -e "$images"
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
with:
|
||||
images: ${{ env.IMAGE_LIST }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=${{ env.TAG }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ env.TAG }},enable=${{ env.PUBLISH_MINOR == 'true' && env.IS_RC != 'true' }}
|
||||
type=raw,value=latest,enable=${{ env.IS_RC != 'true' }}
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
||||
org.opencontainers.image.version=${{ env.TAG }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.documentation=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
||||
org.opencontainers.image.licenses=${{ env.IMAGE_LICENSE }}
|
||||
org.opencontainers.image.created=${{ env.IMAGE_CREATED }}
|
||||
org.opencontainers.image.ref.name=${{ env.TAG }}
|
||||
org.opencontainers.image.authors=${{ github.repository_owner }}
|
||||
- name: Echo build config (non-secret)
|
||||
shell: bash
|
||||
env:
|
||||
IMAGE_TITLE: ${{ github.event.repository.name }}
|
||||
IMAGE_VERSION: ${{ env.TAG }}
|
||||
IMAGE_REVISION: ${{ github.sha }}
|
||||
IMAGE_SOURCE_URL: ${{ github.event.repository.html_url }}
|
||||
IMAGE_URL: ${{ github.event.repository.html_url }}
|
||||
IMAGE_DESCRIPTION: ${{ github.event.repository.description }}
|
||||
IMAGE_LICENSE: ${{ env.IMAGE_LICENSE }}
|
||||
DOCKERHUB_IMAGE: ${{ env.DOCKERHUB_IMAGE }}
|
||||
GHCR_IMAGE: ${{ env.GHCR_IMAGE }}
|
||||
DOCKER_HUB_USER: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
REPO: ${{ github.repository }}
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
WORKFLOW_REF: ${{ github.workflow_ref }}
|
||||
REF: ${{ github.ref }}
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "=== OCI Label Values ==="
|
||||
echo "org.opencontainers.image.title=${IMAGE_TITLE}"
|
||||
echo "org.opencontainers.image.version=${IMAGE_VERSION}"
|
||||
echo "org.opencontainers.image.revision=${IMAGE_REVISION}"
|
||||
echo "org.opencontainers.image.source=${IMAGE_SOURCE_URL}"
|
||||
echo "org.opencontainers.image.url=${IMAGE_URL}"
|
||||
echo "org.opencontainers.image.description=${IMAGE_DESCRIPTION}"
|
||||
echo "org.opencontainers.image.licenses=${IMAGE_LICENSE}"
|
||||
echo
|
||||
echo "=== Images ==="
|
||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE}"
|
||||
echo "GHCR_IMAGE=${GHCR_IMAGE}"
|
||||
echo "DOCKER_HUB_USERNAME=${DOCKER_HUB_USER}"
|
||||
echo
|
||||
echo "=== GitHub Kontext ==="
|
||||
echo "repository=${REPO}"
|
||||
echo "owner=${OWNER}"
|
||||
echo "workflow_ref=${WORKFLOW_REF}"
|
||||
echo "ref=${REF}"
|
||||
echo "ref_name=${REF_NAME}"
|
||||
echo "run_url=${RUN_URL}"
|
||||
echo
|
||||
echo "=== docker/metadata-action outputs (Tags/Labels), raw ==="
|
||||
echo "::group::tags"
|
||||
echo "${{ steps.meta.outputs.tags }}"
|
||||
echo "::endgroup::"
|
||||
echo "::group::labels"
|
||||
echo "${{ steps.meta.outputs.labels }}"
|
||||
echo "::endgroup::"
|
||||
- name: Build and push (Docker Hub + GHCR)
|
||||
id: build
|
||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=${{ github.repository }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.repository }}
|
||||
provenance: mode=max
|
||||
sbom: true
|
||||
|
||||
- name: Compute image digest refs
|
||||
run: |
|
||||
echo "DIGEST=${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
|
||||
echo "GHCR_REF=$GHCR_IMAGE@${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
|
||||
echo "DH_REF=$DOCKERHUB_IMAGE@${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
|
||||
echo "Built digest: ${{ steps.build.outputs.digest }}"
|
||||
shell: bash
|
||||
|
||||
- name: Attest build provenance (GHCR)
|
||||
id: attest-ghcr
|
||||
uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0
|
||||
with:
|
||||
subject-name: ${{ env.GHCR_IMAGE }}
|
||||
subject-digest: ${{ steps.build.outputs.digest }}
|
||||
push-to-registry: true
|
||||
show-summary: true
|
||||
|
||||
- name: Attest build provenance (Docker Hub)
|
||||
continue-on-error: true
|
||||
id: attest-dh
|
||||
uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0
|
||||
with:
|
||||
subject-name: index.docker.io/fosrl/${{ github.event.repository.name }}
|
||||
subject-digest: ${{ steps.build.outputs.digest }}
|
||||
push-to-registry: true
|
||||
show-summary: true
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
with:
|
||||
cosign-release: 'v3.0.2'
|
||||
|
||||
- name: Sanity check cosign private key
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY >/dev/null
|
||||
shell: bash
|
||||
|
||||
- name: Sign GHCR image (digest) with key (recursive)
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Signing ${GHCR_REF} (digest) recursively with provided key"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${GHCR_REF}"
|
||||
echo "Waiting 30 seconds for signatures to propagate..."
|
||||
sleep 30
|
||||
shell: bash
|
||||
|
||||
- name: Generate SBOM (SPDX JSON)
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
with:
|
||||
image-ref: ${{ env.GHCR_IMAGE }}@${{ steps.build.outputs.digest }}
|
||||
format: spdx-json
|
||||
output: sbom.spdx.json
|
||||
|
||||
- name: Validate SBOM JSON
|
||||
run: jq -e . sbom.spdx.json >/dev/null
|
||||
shell: bash
|
||||
|
||||
- name: Minify SBOM JSON (optional hardening)
|
||||
run: jq -c . sbom.spdx.json > sbom.min.json && mv sbom.min.json sbom.spdx.json
|
||||
shell: bash
|
||||
|
||||
- name: Create SBOM attestation (GHCR, private key)
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cosign attest \
|
||||
--key env://COSIGN_PRIVATE_KEY \
|
||||
--type spdxjson \
|
||||
--predicate sbom.spdx.json \
|
||||
"${GHCR_REF}"
|
||||
shell: bash
|
||||
|
||||
- name: Create SBOM attestation (Docker Hub, private key)
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cosign attest \
|
||||
--key env://COSIGN_PRIVATE_KEY \
|
||||
--type spdxjson \
|
||||
--predicate sbom.spdx.json \
|
||||
"${DH_REF}"
|
||||
shell: bash
|
||||
|
||||
- name: Keyless sign & verify GHCR digest (OIDC)
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
WORKFLOW_REF: ${{ github.workflow_ref }} # owner/repo/.github/workflows/<file>@refs/tags/<tag>
|
||||
ISSUER: https://token.actions.githubusercontent.com
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Keyless signing ${GHCR_REF}"
|
||||
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${GHCR_REF}"
|
||||
echo "Verify keyless (OIDC) signature policy on ${GHCR_REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${WORKFLOW_REF}" \
|
||||
"${GHCR_REF}" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Sign Docker Hub image (digest) with key (recursive)
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Signing ${DH_REF} (digest) recursively with provided key (Docker media types fallback)"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${DH_REF}"
|
||||
shell: bash
|
||||
|
||||
- name: Keyless sign & verify Docker Hub digest (OIDC)
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
ISSUER: https://token.actions.githubusercontent.com
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Keyless signing ${DH_REF} (force public-good Rekor)"
|
||||
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${DH_REF}"
|
||||
echo "Keyless verify via Rekor (strict identity)"
|
||||
if ! cosign verify \
|
||||
--rekor-url https://rekor.sigstore.dev \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
||||
"${DH_REF}" -o text; then
|
||||
echo "Rekor verify failed — retry offline bundle verify (no Rekor)"
|
||||
if ! cosign verify \
|
||||
--offline \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
||||
"${DH_REF}" -o text; then
|
||||
echo "Offline bundle verify failed — ignore tlog (TEMP for debugging)"
|
||||
cosign verify \
|
||||
--insecure-ignore-tlog=true \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
||||
"${DH_REF}" -o text || true
|
||||
fi
|
||||
fi
|
||||
- name: Verify signature (public key) GHCR digest + tag
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG_VAR="${TAG}"
|
||||
echo "Verifying (digest) ${GHCR_REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "$GHCR_REF" -o text
|
||||
echo "Verifying (tag) $GHCR_IMAGE:$TAG_VAR"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "$GHCR_IMAGE:$TAG_VAR" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify SBOM attestation (GHCR)
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: cosign verify-attestation --key env://COSIGN_PUBLIC_KEY --type spdxjson "$GHCR_REF" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify SLSA provenance (GHCR)
|
||||
env:
|
||||
ISSUER: https://token.actions.githubusercontent.com
|
||||
WFREF: ${{ github.workflow_ref }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# (optional) show which predicate types are present to aid debugging
|
||||
cosign download attestation "$GHCR_REF" \
|
||||
| jq -r '.payload | @base64d | fromjson | .predicateType' | sort -u || true
|
||||
# Verify the SLSA v1 provenance attestation (predicate URL)
|
||||
cosign verify-attestation \
|
||||
--type 'https://slsa.dev/provenance/v1' \
|
||||
--certificate-oidc-issuer "$ISSUER" \
|
||||
--certificate-identity "https://github.com/${WFREF}" \
|
||||
--rekor-url https://rekor.sigstore.dev \
|
||||
"$GHCR_REF" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify signature (public key) Docker Hub digest
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Verifying (digest) ${DH_REF} with Docker media types"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${DH_REF}" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify signature (public key) Docker Hub tag
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Verifying (tag) $DOCKERHUB_IMAGE:$TAG with Docker media types"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "$DOCKERHUB_IMAGE:$TAG" -o text
|
||||
shell: bash
|
||||
|
||||
# - name: Trivy scan (GHCR image)
|
||||
# id: trivy
|
||||
# uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
@@ -589,28 +193,20 @@ jobs:
|
||||
# sarif_file: trivy-ghcr.sarif
|
||||
# category: Image Vulnerability Scan
|
||||
|
||||
- name: Build binaries
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
GOFLAGS: "-trimpath"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG_VAR="${TAG}"
|
||||
make -j 10 go-build-release tag=$TAG_VAR
|
||||
shell: bash
|
||||
#- name: Build binaries
|
||||
# env:
|
||||
# CGO_ENABLED: "0"
|
||||
# GOFLAGS: "-trimpath"
|
||||
# run: |
|
||||
# set -euo pipefail
|
||||
# TAG_VAR="${TAG}"
|
||||
# make -j 10 go-build-release tag=$TAG_VAR
|
||||
# shell: bash
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@5be0e66d93ac7ed76da52eca8bb058f665c3a5fe # v2.4.2
|
||||
- name: Run GoReleaser (binaries + deb/rpm/apk)
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
tag_name: ${{ env.TAG }}
|
||||
generate_release_notes: true
|
||||
prerelease: ${{ env.IS_RC == 'true' }}
|
||||
files: |
|
||||
bin/*
|
||||
fail_on_unmatched_files: true
|
||||
draft: true
|
||||
body: |
|
||||
## Container Images
|
||||
- GHCR: `${{ env.GHCR_REF }}`
|
||||
- Docker Hub: `${{ env.DH_REF || 'N/A' }}`
|
||||
**Digest:** `${{ steps.build.outputs.digest }}`
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
20
.github/workflows/mirror.yaml
vendored
20
.github/workflows/mirror.yaml
vendored
@@ -1,20 +1,28 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
source_image:
|
||||
description: "Source image (e.g., docker.io/owner/newt)"
|
||||
required: true
|
||||
type: string
|
||||
dest_image:
|
||||
description: "Destination image (e.g., ghcr.io/owner/newt)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/newt
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SOURCE_IMAGE: ${{ inputs.source_image }}
|
||||
DEST_IMAGE: ${{ inputs.dest_image }}
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
|
||||
64
.github/workflows/publish-apt.yml
vendored
Normal file
64
.github/workflows/publish-apt.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: Publish APT repo to S3/CloudFront
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to publish (e.g. 1.9.0). Leave empty to use latest release."
|
||||
required: false
|
||||
type: string
|
||||
backfill_all:
|
||||
description: "Build/publish repo for ALL releases."
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
PKG_NAME: newt
|
||||
SUITE: stable
|
||||
COMPONENT: main
|
||||
REPO_BASE_URL: https://repo.dev.fosrl.io/apt
|
||||
|
||||
AWS_REGION: ${{ vars.AWS_REGION }}
|
||||
S3_BUCKET: ${{ vars.S3_BUCKET }}
|
||||
S3_PREFIX: ${{ vars.S3_PREFIX }}
|
||||
CLOUDFRONT_DISTRIBUTION_ID: ${{ vars.CLOUDFRONT_DISTRIBUTION_ID }}
|
||||
|
||||
INPUT_TAG: ${{ inputs.tag }}
|
||||
BACKFILL_ALL: ${{ inputs.backfill_all }}
|
||||
EVENT_TAG: ${{ github.event.release.tag_name }}
|
||||
PUSH_TAG: ${{ github.ref_name }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials (OIDC)
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
|
||||
aws-region: ${{ vars.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y dpkg-dev apt-utils gnupg curl jq gh
|
||||
|
||||
- name: Publish APT repo
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
APT_GPG_PRIVATE_KEY: ${{ secrets.APT_GPG_PRIVATE_KEY }}
|
||||
APT_GPG_PASSPHRASE: ${{ secrets.APT_GPG_PASSPHRASE }}
|
||||
run: ./scripts/publish-apt.sh
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
matrix:
|
||||
target:
|
||||
- local
|
||||
- docker-build
|
||||
#- docker-build
|
||||
- go-build-release-darwin-amd64
|
||||
- go-build-release-darwin-arm64
|
||||
- go-build-release-freebsd-amd64
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.25
|
||||
|
||||
|
||||
52
.goreleaser.yaml
Normal file
52
.goreleaser.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
version: 2
|
||||
project_name: newt
|
||||
|
||||
release:
|
||||
draft: true
|
||||
prerelease: "{{ contains .Tag \"-rc.\" }}"
|
||||
name_template: "{{ .Tag }}"
|
||||
|
||||
builds:
|
||||
- id: newt
|
||||
main: ./main.go
|
||||
binary: newt
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -s -w -X main.newtVersion={{ .Tag }}
|
||||
|
||||
archives:
|
||||
- id: binaries
|
||||
builds:
|
||||
- newt
|
||||
format: binary
|
||||
name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}"
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
||||
nfpms:
|
||||
- id: packages
|
||||
package_name: newt
|
||||
builds:
|
||||
- newt
|
||||
vendor: fosrl
|
||||
maintainer: fosrl <repo@fosrl.io>
|
||||
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||
license: AGPL-3.0-or-later
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
- apk
|
||||
bindir: /usr/bin
|
||||
file_name_template: "newt_{{ .Version }}_{{ .Arch }}"
|
||||
contents:
|
||||
- src: LICENSE
|
||||
dst: /usr/share/doc/newt/LICENSE
|
||||
@@ -1,5 +1,4 @@
|
||||
# FROM golang:1.25-alpine AS builder
|
||||
FROM public.ecr.aws/docker/library/golang:1.25-alpine AS builder
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
# Install git and ca-certificates
|
||||
RUN apk --no-cache add ca-certificates git tzdata
|
||||
@@ -19,7 +18,7 @@ COPY . .
|
||||
# Build the application
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /newt
|
||||
|
||||
FROM public.ecr.aws/docker/library/alpine:3.23 AS runner
|
||||
FROM alpine:3.23 AS runner
|
||||
|
||||
RUN apk --no-cache add ca-certificates tzdata iputils
|
||||
|
||||
|
||||
13
README.md
13
README.md
@@ -1,15 +1,24 @@
|
||||
# Newt
|
||||
|
||||
[](https://pkg.go.dev/github.com/fosrl/newt)
|
||||
[](https://github.com/fosrl/newt/blob/main/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/fosrl/newt)
|
||||
|
||||
Newt is a fully user space [WireGuard](https://www.wireguard.com/) tunnel client and TCP/UDP proxy, designed to securely expose private resources controlled by Pangolin. By using Newt, you don't need to manage complex WireGuard tunnels and NATing.
|
||||
|
||||
### Installation and Documentation
|
||||
## Installation and Documentation
|
||||
|
||||
Newt is used with Pangolin and Gerbil as part of the larger system. See documentation below:
|
||||
|
||||
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
||||
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
||||
|
||||
### Install via APT (Debian/Ubuntu)
|
||||
|
||||
```bash
|
||||
curl -fsSL https://repo.dev.fosrl.io/apt/public.key | sudo gpg --dearmor -o /usr/share/keyrings/newt-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/newt-archive-keyring.gpg] https://repo.dev.fosrl.io/apt stable main" | sudo tee /etc/apt/sources.list.d/newt.list
|
||||
sudo apt update && sudo apt install newt
|
||||
```
|
||||
|
||||
## Key Functions
|
||||
|
||||
|
||||
@@ -46,12 +46,11 @@ func startAuthDaemon(ctx context.Context) error {
|
||||
|
||||
// Create auth daemon server
|
||||
cfg := authdaemon.Config{
|
||||
DisableHTTPS: true, // We run without HTTP server in newt
|
||||
PresharedKey: "this-key-is-not-used", // Not used in embedded mode, but set to non-empty to satisfy validation
|
||||
PrincipalsFilePath: principalsFile,
|
||||
CACertPath: caCertPath,
|
||||
Force: true,
|
||||
GenerateRandomPassword: authDaemonGenerateRandomPassword,
|
||||
DisableHTTPS: true, // We run without HTTP server in newt
|
||||
PresharedKey: "this-key-is-not-used", // Not used in embedded mode, but set to non-empty to satisfy validation
|
||||
PrincipalsFilePath: principalsFile,
|
||||
CACertPath: caCertPath,
|
||||
Force: true,
|
||||
}
|
||||
|
||||
srv, err := authdaemon.NewServer(cfg)
|
||||
@@ -73,6 +72,8 @@ func startAuthDaemon(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
// runPrincipalsCmd executes the principals subcommand logic
|
||||
func runPrincipalsCmd(args []string) {
|
||||
opts := struct {
|
||||
@@ -147,4 +148,4 @@ Example:
|
||||
newt principals --username alice
|
||||
|
||||
`, defaultPrincipalsPath)
|
||||
}
|
||||
}
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
// ProcessConnection runs the same logic as POST /connection: CA cert, user create/reconcile, principals.
|
||||
// Use this when DisableHTTPS is true (e.g. embedded in Newt) instead of calling the API.
|
||||
func (s *Server) ProcessConnection(req ConnectionRequest) {
|
||||
logger.Info("connection: niceId=%q username=%q metadata.sudoMode=%q metadata.sudoCommands=%v metadata.homedir=%v metadata.groups=%v",
|
||||
req.NiceId, req.Username, req.Metadata.SudoMode, req.Metadata.SudoCommands, req.Metadata.Homedir, req.Metadata.Groups)
|
||||
logger.Info("connection: niceId=%q username=%q metadata.sudo=%v metadata.homedir=%v",
|
||||
req.NiceId, req.Username, req.Metadata.Sudo, req.Metadata.Homedir)
|
||||
|
||||
cfg := &s.cfg
|
||||
if cfg.CACertPath != "" {
|
||||
@@ -16,7 +16,7 @@ func (s *Server) ProcessConnection(req ConnectionRequest) {
|
||||
logger.Warn("auth-daemon: write CA cert: %v", err)
|
||||
}
|
||||
}
|
||||
if err := ensureUser(req.Username, req.Metadata, s.cfg.GenerateRandomPassword); err != nil {
|
||||
if err := ensureUser(req.Username, req.Metadata); err != nil {
|
||||
logger.Warn("auth-daemon: ensure user: %v", err)
|
||||
}
|
||||
if cfg.PrincipalsFilePath != "" {
|
||||
|
||||
@@ -4,8 +4,6 @@ package authdaemon
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -124,73 +122,8 @@ func sudoGroup() string {
|
||||
return "sudo"
|
||||
}
|
||||
|
||||
// setRandomPassword generates a random password and sets it for username via chpasswd.
|
||||
// Used when GenerateRandomPassword is true so SSH with PermitEmptyPasswords no can accept the user.
|
||||
func setRandomPassword(username string) error {
|
||||
b := make([]byte, 16)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return fmt.Errorf("generate password: %w", err)
|
||||
}
|
||||
password := hex.EncodeToString(b)
|
||||
cmd := exec.Command("chpasswd")
|
||||
cmd.Stdin = strings.NewReader(username + ":" + password)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("chpasswd: %w (output: %s)", err, string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const skelDir = "/etc/skel"
|
||||
|
||||
// copySkelInto copies files from srcDir (e.g. /etc/skel) into dstDir (e.g. user's home).
|
||||
// Only creates files that don't already exist. All created paths are chowned to uid:gid.
|
||||
func copySkelInto(srcDir, dstDir string, uid, gid int) {
|
||||
entries, err := os.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
logger.Warn("auth-daemon: read %s: %v", srcDir, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
for _, e := range entries {
|
||||
name := e.Name()
|
||||
src := filepath.Join(srcDir, name)
|
||||
dst := filepath.Join(dstDir, name)
|
||||
if e.IsDir() {
|
||||
if st, err := os.Stat(dst); err == nil && st.IsDir() {
|
||||
copySkelInto(src, dst, uid, gid)
|
||||
continue
|
||||
}
|
||||
if err := os.MkdirAll(dst, 0755); err != nil {
|
||||
logger.Warn("auth-daemon: mkdir %s: %v", dst, err)
|
||||
continue
|
||||
}
|
||||
if err := os.Chown(dst, uid, gid); err != nil {
|
||||
logger.Warn("auth-daemon: chown %s: %v", dst, err)
|
||||
}
|
||||
copySkelInto(src, dst, uid, gid)
|
||||
continue
|
||||
}
|
||||
if _, err := os.Stat(dst); err == nil {
|
||||
continue
|
||||
}
|
||||
data, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
logger.Warn("auth-daemon: read %s: %v", src, err)
|
||||
continue
|
||||
}
|
||||
if err := os.WriteFile(dst, data, 0644); err != nil {
|
||||
logger.Warn("auth-daemon: write %s: %v", dst, err)
|
||||
continue
|
||||
}
|
||||
if err := os.Chown(dst, uid, gid); err != nil {
|
||||
logger.Warn("auth-daemon: chown %s: %v", dst, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensureUser creates the system user if missing, or reconciles sudo and homedir to match meta.
|
||||
func ensureUser(username string, meta ConnectionMetadata, generateRandomPassword bool) error {
|
||||
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -199,49 +132,12 @@ func ensureUser(username string, meta ConnectionMetadata, generateRandomPassword
|
||||
if _, ok := err.(user.UnknownUserError); !ok {
|
||||
return fmt.Errorf("lookup user %s: %w", username, err)
|
||||
}
|
||||
return createUser(username, meta, generateRandomPassword)
|
||||
return createUser(username, meta)
|
||||
}
|
||||
return reconcileUser(u, meta)
|
||||
}
|
||||
|
||||
// desiredGroups returns the exact list of supplementary groups the user should have:
|
||||
// meta.Groups plus the sudo group when meta.SudoMode is "full" (deduped).
|
||||
func desiredGroups(meta ConnectionMetadata) []string {
|
||||
seen := make(map[string]struct{})
|
||||
var out []string
|
||||
for _, g := range meta.Groups {
|
||||
g = strings.TrimSpace(g)
|
||||
if g == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[g]; ok {
|
||||
continue
|
||||
}
|
||||
seen[g] = struct{}{}
|
||||
out = append(out, g)
|
||||
}
|
||||
if meta.SudoMode == "full" {
|
||||
sg := sudoGroup()
|
||||
if _, ok := seen[sg]; !ok {
|
||||
out = append(out, sg)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// setUserGroups sets the user's supplementary groups to exactly groups (local mirrors metadata).
|
||||
// When groups is empty, clears all supplementary groups (usermod -G "").
|
||||
func setUserGroups(username string, groups []string) {
|
||||
list := strings.Join(groups, ",")
|
||||
cmd := exec.Command("usermod", "-G", list, username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -G %s: %v (output: %s)", list, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: set %s supplementary groups to %s", username, list)
|
||||
}
|
||||
}
|
||||
|
||||
func createUser(username string, meta ConnectionMetadata, generateRandomPassword bool) error {
|
||||
func createUser(username string, meta ConnectionMetadata) error {
|
||||
args := []string{"-s", "/bin/bash"}
|
||||
if meta.Homedir {
|
||||
args = append(args, "-m")
|
||||
@@ -254,143 +150,75 @@ func createUser(username string, meta ConnectionMetadata, generateRandomPassword
|
||||
return fmt.Errorf("useradd %s: %w (output: %s)", username, err, string(out))
|
||||
}
|
||||
logger.Info("auth-daemon: created user %s (homedir=%v)", username, meta.Homedir)
|
||||
if generateRandomPassword {
|
||||
if err := setRandomPassword(username); err != nil {
|
||||
logger.Warn("auth-daemon: set random password for %s: %v", username, err)
|
||||
if meta.Sudo {
|
||||
group := sudoGroup()
|
||||
cmd := exec.Command("usermod", "-aG", group, username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, username, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: set random password for %s (PermitEmptyPasswords no)", username)
|
||||
logger.Info("auth-daemon: added %s to %s", username, group)
|
||||
}
|
||||
}
|
||||
if meta.Homedir {
|
||||
if u, err := user.Lookup(username); err == nil && u.HomeDir != "" {
|
||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
||||
copySkelInto(skelDir, u.HomeDir, uid, gid)
|
||||
}
|
||||
}
|
||||
setUserGroups(username, desiredGroups(meta))
|
||||
switch meta.SudoMode {
|
||||
case "full":
|
||||
if err := configurePasswordlessSudo(username); err != nil {
|
||||
logger.Warn("auth-daemon: configure passwordless sudo for %s: %v", username, err)
|
||||
}
|
||||
case "commands":
|
||||
if len(meta.SudoCommands) > 0 {
|
||||
if err := configureSudoCommands(username, meta.SudoCommands); err != nil {
|
||||
logger.Warn("auth-daemon: configure sudo commands for %s: %v", username, err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
removeSudoers(username)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const sudoersFilePrefix = "90-pangolin-"
|
||||
|
||||
func sudoersPath(username string) string {
|
||||
return filepath.Join("/etc/sudoers.d", sudoersFilePrefix+username)
|
||||
}
|
||||
|
||||
// writeSudoersFile writes content to the user's sudoers.d file and validates with visudo.
|
||||
func writeSudoersFile(username, content string) error {
|
||||
sudoersFile := sudoersPath(username)
|
||||
tmpFile := sudoersFile + ".tmp"
|
||||
if err := os.WriteFile(tmpFile, []byte(content), 0440); err != nil {
|
||||
return fmt.Errorf("write temp sudoers file: %w", err)
|
||||
}
|
||||
cmd := exec.Command("visudo", "-c", "-f", tmpFile)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
os.Remove(tmpFile)
|
||||
return fmt.Errorf("visudo validation failed: %w (output: %s)", err, string(out))
|
||||
}
|
||||
if err := os.Rename(tmpFile, sudoersFile); err != nil {
|
||||
os.Remove(tmpFile)
|
||||
return fmt.Errorf("move sudoers file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// configurePasswordlessSudo creates a sudoers.d file to allow passwordless sudo for the user.
|
||||
func configurePasswordlessSudo(username string) error {
|
||||
content := fmt.Sprintf("# Created by Pangolin auth-daemon\n%s ALL=(ALL) NOPASSWD:ALL\n", username)
|
||||
if err := writeSudoersFile(username, content); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("auth-daemon: configured passwordless sudo for %s", username)
|
||||
return nil
|
||||
}
|
||||
|
||||
// configureSudoCommands creates a sudoers.d file allowing only the listed commands (NOPASSWD).
|
||||
// Each command should be a full path (e.g. /usr/bin/systemctl).
|
||||
func configureSudoCommands(username string, commands []string) error {
|
||||
var b strings.Builder
|
||||
b.WriteString("# Created by Pangolin auth-daemon (restricted commands)\n")
|
||||
n := 0
|
||||
for _, c := range commands {
|
||||
c = strings.TrimSpace(c)
|
||||
if c == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(&b, "%s ALL=(ALL) NOPASSWD: %s\n", username, c)
|
||||
n++
|
||||
}
|
||||
if n == 0 {
|
||||
return fmt.Errorf("no valid sudo commands")
|
||||
}
|
||||
if err := writeSudoersFile(username, b.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("auth-daemon: configured restricted sudo for %s (%d commands)", username, len(commands))
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeSudoers removes the sudoers.d file for the user.
|
||||
func removeSudoers(username string) {
|
||||
sudoersFile := sudoersPath(username)
|
||||
if err := os.Remove(sudoersFile); err != nil && !os.IsNotExist(err) {
|
||||
logger.Warn("auth-daemon: remove sudoers for %s: %v", username, err)
|
||||
} else if err == nil {
|
||||
logger.Info("auth-daemon: removed sudoers for %s", username)
|
||||
}
|
||||
}
|
||||
|
||||
func mustAtoi(s string) int {
|
||||
n, _ := strconv.Atoi(s)
|
||||
return n
|
||||
}
|
||||
|
||||
func reconcileUser(u *user.User, meta ConnectionMetadata) error {
|
||||
setUserGroups(u.Username, desiredGroups(meta))
|
||||
switch meta.SudoMode {
|
||||
case "full":
|
||||
if err := configurePasswordlessSudo(u.Username); err != nil {
|
||||
logger.Warn("auth-daemon: configure passwordless sudo for %s: %v", u.Username, err)
|
||||
}
|
||||
case "commands":
|
||||
if len(meta.SudoCommands) > 0 {
|
||||
if err := configureSudoCommands(u.Username, meta.SudoCommands); err != nil {
|
||||
logger.Warn("auth-daemon: configure sudo commands for %s: %v", u.Username, err)
|
||||
}
|
||||
group := sudoGroup()
|
||||
inGroup, err := userInGroup(u.Username, group)
|
||||
if err != nil {
|
||||
logger.Warn("auth-daemon: check group %s: %v", group, err)
|
||||
inGroup = false
|
||||
}
|
||||
if meta.Sudo && !inGroup {
|
||||
cmd := exec.Command("usermod", "-aG", group, u.Username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, u.Username, err, string(out))
|
||||
} else {
|
||||
removeSudoers(u.Username)
|
||||
logger.Info("auth-daemon: added %s to %s", u.Username, group)
|
||||
}
|
||||
} else if !meta.Sudo && inGroup {
|
||||
cmd := exec.Command("gpasswd", "-d", u.Username, group)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: gpasswd -d %s %s: %v (output: %s)", u.Username, group, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: removed %s from %s", u.Username, group)
|
||||
}
|
||||
default:
|
||||
removeSudoers(u.Username)
|
||||
}
|
||||
if meta.Homedir && u.HomeDir != "" {
|
||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
||||
if st, err := os.Stat(u.HomeDir); err != nil || !st.IsDir() {
|
||||
if err := os.MkdirAll(u.HomeDir, 0755); err != nil {
|
||||
logger.Warn("auth-daemon: mkdir %s: %v", u.HomeDir, err)
|
||||
} else {
|
||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
||||
_ = os.Chown(u.HomeDir, uid, gid)
|
||||
copySkelInto(skelDir, u.HomeDir, uid, gid)
|
||||
logger.Info("auth-daemon: created home %s for %s", u.HomeDir, u.Username)
|
||||
}
|
||||
} else {
|
||||
// Ensure .bashrc etc. exist (e.g. home existed but was empty or skel was minimal)
|
||||
copySkelInto(skelDir, u.HomeDir, uid, gid)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func userInGroup(username, groupName string) (bool, error) {
|
||||
// getent group wheel returns "wheel:x:10:user1,user2"
|
||||
cmd := exec.Command("getent", "group", groupName)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
parts := strings.SplitN(strings.TrimSpace(string(out)), ":", 4)
|
||||
if len(parts) < 4 {
|
||||
return false, nil
|
||||
}
|
||||
members := strings.Split(parts[3], ",")
|
||||
for _, m := range members {
|
||||
if strings.TrimSpace(m) == username {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ func writeCACertIfNotExists(path, contents string, force bool) error {
|
||||
}
|
||||
|
||||
// ensureUser returns an error on non-Linux.
|
||||
func ensureUser(username string, meta ConnectionMetadata, generateRandomPassword bool) error {
|
||||
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
|
||||
|
||||
@@ -13,10 +13,8 @@ func (s *Server) registerRoutes() {
|
||||
|
||||
// ConnectionMetadata is the metadata object in POST /connection.
|
||||
type ConnectionMetadata struct {
|
||||
SudoMode string `json:"sudoMode"` // "none" | "full" | "commands"
|
||||
SudoCommands []string `json:"sudoCommands"` // used when sudoMode is "commands"
|
||||
Homedir bool `json:"homedir"`
|
||||
Groups []string `json:"groups"` // system groups to add the user to
|
||||
Sudo bool `json:"sudo"`
|
||||
Homedir bool `json:"homedir"`
|
||||
}
|
||||
|
||||
// ConnectionRequest is the JSON body for POST /connection.
|
||||
|
||||
@@ -27,9 +27,8 @@ type Config struct {
|
||||
Port int // Required when DisableHTTPS is false. Listen port for the HTTPS server. No default.
|
||||
PresharedKey string // Required when DisableHTTPS is false. HTTP auth (Authorization: Bearer <key> or X-Preshared-Key: <key>). No default.
|
||||
CACertPath string // Required. Where to write the CA cert (e.g. /etc/ssh/ca.pem). No default.
|
||||
Force bool // If true, overwrite existing CA cert (and other items) when content differs. Default false.
|
||||
PrincipalsFilePath string // Required. Path to the principals data file (JSON: username -> array of principals). No default.
|
||||
GenerateRandomPassword bool // If true, set a random password on users when they are provisioned (for SSH PermitEmptyPasswords no).
|
||||
Force bool // If true, overwrite existing CA cert (and other items) when content differs. Default false.
|
||||
PrincipalsFilePath string // Required. Path to the principals data file (JSON: username -> array of principals). No default.
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
|
||||
@@ -37,12 +37,11 @@ type WgConfig struct {
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
SourcePrefix string `json:"sourcePrefix"`
|
||||
SourcePrefixes []string `json:"sourcePrefixes"`
|
||||
DestPrefix string `json:"destPrefix"`
|
||||
RewriteTo string `json:"rewriteTo,omitempty"`
|
||||
DisableIcmp bool `json:"disableIcmp,omitempty"`
|
||||
PortRange []PortRange `json:"portRange,omitempty"`
|
||||
SourcePrefix string `json:"sourcePrefix"`
|
||||
DestPrefix string `json:"destPrefix"`
|
||||
RewriteTo string `json:"rewriteTo,omitempty"`
|
||||
DisableIcmp bool `json:"disableIcmp,omitempty"`
|
||||
PortRange []PortRange `json:"portRange,omitempty"`
|
||||
}
|
||||
|
||||
type PortRange struct {
|
||||
@@ -173,7 +172,6 @@ func NewWireGuardService(interfaceName string, port uint16, mtu int, host string
|
||||
wsClient.RegisterHandler("newt/wg/targets/add", service.handleAddTarget)
|
||||
wsClient.RegisterHandler("newt/wg/targets/remove", service.handleRemoveTarget)
|
||||
wsClient.RegisterHandler("newt/wg/targets/update", service.handleUpdateTarget)
|
||||
wsClient.RegisterHandler("newt/wg/sync", service.handleSyncConfig)
|
||||
|
||||
return service, nil
|
||||
}
|
||||
@@ -279,7 +277,7 @@ func (s *WireGuardService) StartHolepunch(publicKey string, endpoint string, rel
|
||||
}
|
||||
|
||||
if relayPort == 0 {
|
||||
relayPort = 21820
|
||||
relayPort = 21820
|
||||
}
|
||||
|
||||
// Convert websocket.ExitNode to holepunch.ExitNode
|
||||
@@ -494,183 +492,6 @@ func (s *WireGuardService) handleConfig(msg websocket.WSMessage) {
|
||||
logger.Info("Client connectivity setup. Ready to accept connections from clients!")
|
||||
}
|
||||
|
||||
// SyncConfig represents the configuration sent from server for syncing
|
||||
type SyncConfig struct {
|
||||
Targets []Target `json:"targets"`
|
||||
Peers []Peer `json:"peers"`
|
||||
}
|
||||
|
||||
func (s *WireGuardService) handleSyncConfig(msg websocket.WSMessage) {
|
||||
var syncConfig SyncConfig
|
||||
|
||||
logger.Debug("Received sync message: %v", msg)
|
||||
logger.Info("Received sync configuration from remote server")
|
||||
|
||||
jsonData, err := json.Marshal(msg.Data)
|
||||
if err != nil {
|
||||
logger.Error("Error marshaling sync data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(jsonData, &syncConfig); err != nil {
|
||||
logger.Error("Error unmarshaling sync data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Sync peers
|
||||
if err := s.syncPeers(syncConfig.Peers); err != nil {
|
||||
logger.Error("Failed to sync peers: %v", err)
|
||||
}
|
||||
|
||||
// Sync targets
|
||||
if err := s.syncTargets(syncConfig.Targets); err != nil {
|
||||
logger.Error("Failed to sync targets: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// syncPeers synchronizes the current peers with the desired state
|
||||
// It removes peers not in the desired list and adds missing ones
|
||||
func (s *WireGuardService) syncPeers(desiredPeers []Peer) error {
|
||||
if s.device == nil {
|
||||
return fmt.Errorf("WireGuard device is not initialized")
|
||||
}
|
||||
|
||||
// Get current peers from the device
|
||||
currentConfig, err := s.device.IpcGet()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current device config: %v", err)
|
||||
}
|
||||
|
||||
// Parse current peer public keys
|
||||
lines := strings.Split(currentConfig, "\n")
|
||||
currentPeerKeys := make(map[string]bool)
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(line, "public_key=") {
|
||||
pubKey := strings.TrimPrefix(line, "public_key=")
|
||||
currentPeerKeys[pubKey] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Build a map of desired peers by their public key (normalized)
|
||||
desiredPeerMap := make(map[string]Peer)
|
||||
for _, peer := range desiredPeers {
|
||||
// Normalize the public key for comparison
|
||||
pubKey, err := wgtypes.ParseKey(peer.PublicKey)
|
||||
if err != nil {
|
||||
logger.Warn("Invalid public key in desired peers: %s", peer.PublicKey)
|
||||
continue
|
||||
}
|
||||
normalizedKey := util.FixKey(pubKey.String())
|
||||
desiredPeerMap[normalizedKey] = peer
|
||||
}
|
||||
|
||||
// Remove peers that are not in the desired list
|
||||
for currentKey := range currentPeerKeys {
|
||||
if _, exists := desiredPeerMap[currentKey]; !exists {
|
||||
// Parse the key back to get the original format for removal
|
||||
removeConfig := fmt.Sprintf("public_key=%s\nremove=true", currentKey)
|
||||
if err := s.device.IpcSet(removeConfig); err != nil {
|
||||
logger.Warn("Failed to remove peer %s during sync: %v", currentKey, err)
|
||||
} else {
|
||||
logger.Info("Removed peer %s during sync", currentKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add peers that are missing
|
||||
for normalizedKey, peer := range desiredPeerMap {
|
||||
if _, exists := currentPeerKeys[normalizedKey]; !exists {
|
||||
if err := s.addPeerToDevice(peer); err != nil {
|
||||
logger.Warn("Failed to add peer %s during sync: %v", peer.PublicKey, err)
|
||||
} else {
|
||||
logger.Info("Added peer %s during sync", peer.PublicKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncTargets synchronizes the current targets with the desired state
|
||||
// It removes targets not in the desired list and adds missing ones
|
||||
func (s *WireGuardService) syncTargets(desiredTargets []Target) error {
|
||||
if s.tnet == nil {
|
||||
// Native interface mode - proxy features not available, skip silently
|
||||
logger.Debug("Skipping target sync - using native interface (no proxy support)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get current rules from the proxy handler
|
||||
currentRules := s.tnet.GetProxySubnetRules()
|
||||
|
||||
// Build a map of current rules by source+dest prefix
|
||||
type ruleKey struct {
|
||||
sourcePrefix string
|
||||
destPrefix string
|
||||
}
|
||||
currentRuleMap := make(map[ruleKey]bool)
|
||||
for _, rule := range currentRules {
|
||||
key := ruleKey{
|
||||
sourcePrefix: rule.SourcePrefix.String(),
|
||||
destPrefix: rule.DestPrefix.String(),
|
||||
}
|
||||
currentRuleMap[key] = true
|
||||
}
|
||||
|
||||
// Build a map of desired targets
|
||||
desiredTargetMap := make(map[ruleKey]Target)
|
||||
for _, target := range desiredTargets {
|
||||
key := ruleKey{
|
||||
sourcePrefix: target.SourcePrefix,
|
||||
destPrefix: target.DestPrefix,
|
||||
}
|
||||
desiredTargetMap[key] = target
|
||||
}
|
||||
|
||||
// Remove targets that are not in the desired list
|
||||
for _, rule := range currentRules {
|
||||
key := ruleKey{
|
||||
sourcePrefix: rule.SourcePrefix.String(),
|
||||
destPrefix: rule.DestPrefix.String(),
|
||||
}
|
||||
if _, exists := desiredTargetMap[key]; !exists {
|
||||
s.tnet.RemoveProxySubnetRule(rule.SourcePrefix, rule.DestPrefix)
|
||||
logger.Info("Removed target %s -> %s during sync", rule.SourcePrefix.String(), rule.DestPrefix.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Add targets that are missing
|
||||
for key, target := range desiredTargetMap {
|
||||
if _, exists := currentRuleMap[key]; !exists {
|
||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||
if err != nil {
|
||||
logger.Warn("Invalid source prefix %s during sync: %v", target.SourcePrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||
if err != nil {
|
||||
logger.Warn("Invalid dest prefix %s during sync: %v", target.DestPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
var portRanges []netstack2.PortRange
|
||||
for _, pr := range target.PortRange {
|
||||
portRanges = append(portRanges, netstack2.PortRange{
|
||||
Min: pr.Min,
|
||||
Max: pr.Max,
|
||||
Protocol: pr.Protocol,
|
||||
})
|
||||
}
|
||||
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
logger.Info("Added target %s -> %s during sync", target.SourcePrefix, target.DestPrefix)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *WireGuardService) ensureWireguardInterface(wgconfig WgConfig) error {
|
||||
s.mu.Lock()
|
||||
|
||||
@@ -874,19 +695,6 @@ func (s *WireGuardService) ensureWireguardPeers(peers []Peer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveSourcePrefixes returns the effective list of source prefixes for a target,
|
||||
// supporting both the legacy single SourcePrefix field and the new SourcePrefixes array.
|
||||
// If SourcePrefixes is non-empty it takes precedence; otherwise SourcePrefix is used.
|
||||
func resolveSourcePrefixes(target Target) []string {
|
||||
if len(target.SourcePrefixes) > 0 {
|
||||
return target.SourcePrefixes
|
||||
}
|
||||
if target.SourcePrefix != "" {
|
||||
return []string{target.SourcePrefix}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *WireGuardService) ensureTargets(targets []Target) error {
|
||||
if s.tnet == nil {
|
||||
// Native interface mode - proxy features not available, skip silently
|
||||
@@ -895,6 +703,11 @@ func (s *WireGuardService) ensureTargets(targets []Target) error {
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||
}
|
||||
|
||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CIDR %s: %v", target.DestPrefix, err)
|
||||
@@ -909,14 +722,9 @@ func (s *WireGuardService) ensureTargets(targets []Target) error {
|
||||
})
|
||||
}
|
||||
|
||||
for _, sp := range resolveSourcePrefixes(target) {
|
||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid CIDR %s: %v", sp, err)
|
||||
}
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", sp, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||
}
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
|
||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", target.SourcePrefix, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -1235,7 +1043,7 @@ func (s *WireGuardService) processPeerBandwidth(publicKey string, rxBytes, txByt
|
||||
BytesOut: bytesOutMB,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1286,6 +1094,12 @@ func (s *WireGuardService) handleAddTarget(msg websocket.WSMessage) {
|
||||
|
||||
// Process all targets
|
||||
for _, target := range targets {
|
||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||
@@ -1295,21 +1109,15 @@ func (s *WireGuardService) handleAddTarget(msg websocket.WSMessage) {
|
||||
var portRanges []netstack2.PortRange
|
||||
for _, pr := range target.PortRange {
|
||||
portRanges = append(portRanges, netstack2.PortRange{
|
||||
Min: pr.Min,
|
||||
Max: pr.Max,
|
||||
Protocol: pr.Protocol,
|
||||
Min: pr.Min,
|
||||
Max: pr.Max,
|
||||
Protocol: pr.Protocol,
|
||||
})
|
||||
}
|
||||
|
||||
for _, sp := range resolveSourcePrefixes(target) {
|
||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
||||
continue
|
||||
}
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", sp, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||
}
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
|
||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", target.SourcePrefix, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1338,21 +1146,21 @@ func (s *WireGuardService) handleRemoveTarget(msg websocket.WSMessage) {
|
||||
|
||||
// Process all targets
|
||||
for _, target := range targets {
|
||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, sp := range resolveSourcePrefixes(target) {
|
||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
||||
continue
|
||||
}
|
||||
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
||||
logger.Info("Removed target subnet %s with destination %s", sp, target.DestPrefix)
|
||||
}
|
||||
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
||||
|
||||
logger.Info("Removed target subnet %s with destination %s", target.SourcePrefix, target.DestPrefix)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1386,24 +1194,30 @@ func (s *WireGuardService) handleUpdateTarget(msg websocket.WSMessage) {
|
||||
|
||||
// Process all update requests
|
||||
for _, target := range requests.OldTargets {
|
||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, sp := range resolveSourcePrefixes(target) {
|
||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
||||
continue
|
||||
}
|
||||
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
||||
logger.Info("Removed target subnet %s with destination %s", sp, target.DestPrefix)
|
||||
}
|
||||
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
||||
logger.Info("Removed target subnet %s with destination %s", target.SourcePrefix, target.DestPrefix)
|
||||
}
|
||||
|
||||
for _, target := range requests.NewTargets {
|
||||
// Now add the new target
|
||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||
continue
|
||||
}
|
||||
|
||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||
@@ -1413,21 +1227,14 @@ func (s *WireGuardService) handleUpdateTarget(msg websocket.WSMessage) {
|
||||
var portRanges []netstack2.PortRange
|
||||
for _, pr := range target.PortRange {
|
||||
portRanges = append(portRanges, netstack2.PortRange{
|
||||
Min: pr.Min,
|
||||
Max: pr.Max,
|
||||
Protocol: pr.Protocol,
|
||||
Min: pr.Min,
|
||||
Max: pr.Max,
|
||||
Protocol: pr.Protocol,
|
||||
})
|
||||
}
|
||||
|
||||
for _, sp := range resolveSourcePrefixes(target) {
|
||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
||||
if err != nil {
|
||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
||||
continue
|
||||
}
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", sp, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||
}
|
||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", target.SourcePrefix, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
inherit version;
|
||||
src = pkgs.nix-gitignore.gitignoreSource [ ] ./.;
|
||||
|
||||
vendorHash = "sha256-kmQM8Yy5TuOiNpMpUme/2gfE+vrhUK+0AphN+p71wGs=";
|
||||
vendorHash = "sha256-Sib6AUCpMgxlMpTc2Esvs+UU0yduVOxWUgT44FHAI+k=";
|
||||
|
||||
nativeInstallCheckInputs = [ pkgs.versionCheckHook ];
|
||||
|
||||
|
||||
49
go.mod
49
go.mod
@@ -1,30 +1,29 @@
|
||||
module github.com/fosrl/newt
|
||||
|
||||
go 1.25.0
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/gaissmai/bart v0.26.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0
|
||||
go.opentelemetry.io/otel v1.41.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.63.0
|
||||
go.opentelemetry.io/otel/metric v1.41.0
|
||||
go.opentelemetry.io/otel/sdk v1.41.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0
|
||||
golang.org/x/crypto v0.48.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0
|
||||
go.opentelemetry.io/otel v1.39.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0
|
||||
go.opentelemetry.io/otel/metric v1.39.0
|
||||
go.opentelemetry.io/otel/sdk v1.39.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||
golang.org/x/net v0.51.0
|
||||
golang.org/x/sys v0.41.0
|
||||
golang.org/x/net v0.48.0
|
||||
golang.org/x/sys v0.39.0
|
||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/grpc v1.79.1
|
||||
google.golang.org/grpc v1.77.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c
|
||||
software.sslmate.com/src/go-pkcs12 v0.7.0
|
||||
@@ -45,7 +44,7 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
@@ -55,23 +54,23 @@ require (
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.5 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.41.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/text v0.34.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
)
|
||||
|
||||
94
go.sum
94
go.sum
@@ -26,8 +26,6 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/gaissmai/bart v0.26.0 h1:xOZ57E9hJLBiQaSyeZa9wgWhGuzfGACgqp4BE77OkO0=
|
||||
github.com/gaissmai/bart v0.26.0/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -43,8 +41,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -77,8 +75,8 @@ github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
||||
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
|
||||
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
@@ -95,56 +93,56 @@ github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zd
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0 h1:PnV4kVnw0zOmwwFkAzCN5O07fw1YOIQor120zrh0AVo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0/go.mod h1:ofAwF4uinaf8SXdVzzbL4OsxJ3VfeEg3f/F6CeF49/Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0 h1:JruBNmrPELWjR+PU3fsQBFQRYtsMLQ/zPfbvwDz9I/w=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0/go.mod h1:vwNrfL6w1uAE3qX48KFii2Qoqf+NEDP5wNjus+RHz8Y=
|
||||
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
|
||||
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 h1:VO3BL6OZXRQ1yQc8W6EVfJzINeJ35BkiHx4MYfoQf44=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0/go.mod h1:qRDnJ2nv3CQXMK2HUd9K9VtvedsPAce3S+/4LZHjX/s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 h1:mq/Qcf28TWz719lE3/hMB4KkyDuLJIvgJnFGcd0kEUI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0/go.mod h1:yk5LXEYhsL2htyDNJbEq7fWzNEigeEdV5xBF/Y+kAv0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0 h1:/+/+UjlXjFcdDlXxKL1PouzX8Z2Vl0OxolRKeBEgYDw=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0/go.mod h1:Ldm/PDuzY2DP7IypudopCR3OCOW42NJlN9+mNEroevo=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 h1:cEf8jF6WbuGQWUVcqgyWtTR0kOOAWY1DYZ+UhvdmQPw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0/go.mod h1:k1lzV5n5U3HkGvTCJHraTAGJ7MqsgL1wrGwTj1Isfiw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.63.0 h1:OLo1FNb0pBZykLqbKRZolKtGZd0Waqlr240YdMEnhhg=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.63.0/go.mod h1:8yeQAdhrK5xsWuFehO13Dk/Xb9FuhZoVpJfpoNCfJnw=
|
||||
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
|
||||
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
|
||||
go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8=
|
||||
go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y=
|
||||
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
|
||||
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0 h1:cCyZS4dr67d30uDyh8etKM2QyDsQ4zC9ds3bdbrVoD0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0/go.mod h1:iivMuj3xpR2DkUrUya3TPS/Z9h3dz7h01GxU+fQBRNg=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo=
|
||||
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb h1:whnFRlWMcXI9d+ZbWg+4sHnLp52d5yiIPUxMBSt4X9A=
|
||||
@@ -155,14 +153,14 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
|
||||
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
@@ -521,82 +521,3 @@ func (m *Monitor) DisableTarget(id int) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTargetIDs returns a slice of all current target IDs
|
||||
func (m *Monitor) GetTargetIDs() []int {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
|
||||
ids := make([]int, 0, len(m.targets))
|
||||
for id := range m.targets {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// SyncTargets synchronizes the current targets to match the desired set.
|
||||
// It removes targets not in the desired set and adds targets that are missing.
|
||||
func (m *Monitor) SyncTargets(desiredConfigs []Config) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
logger.Info("Syncing health check targets: %d desired targets", len(desiredConfigs))
|
||||
|
||||
// Build a set of desired target IDs
|
||||
desiredIDs := make(map[int]Config)
|
||||
for _, config := range desiredConfigs {
|
||||
desiredIDs[config.ID] = config
|
||||
}
|
||||
|
||||
// Find targets to remove (exist but not in desired set)
|
||||
var toRemove []int
|
||||
for id := range m.targets {
|
||||
if _, exists := desiredIDs[id]; !exists {
|
||||
toRemove = append(toRemove, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove targets that are not in the desired set
|
||||
for _, id := range toRemove {
|
||||
logger.Info("Sync: removing health check target %d", id)
|
||||
if target, exists := m.targets[id]; exists {
|
||||
target.cancel()
|
||||
delete(m.targets, id)
|
||||
}
|
||||
}
|
||||
|
||||
// Add or update targets from the desired set
|
||||
var addedCount, updatedCount int
|
||||
for id, config := range desiredIDs {
|
||||
if existing, exists := m.targets[id]; exists {
|
||||
// Target exists - check if config changed and update if needed
|
||||
// For now, we'll replace it to ensure config is up to date
|
||||
logger.Debug("Sync: updating health check target %d", id)
|
||||
existing.cancel()
|
||||
delete(m.targets, id)
|
||||
if err := m.addTargetUnsafe(config); err != nil {
|
||||
logger.Error("Sync: failed to update target %d: %v", id, err)
|
||||
return fmt.Errorf("failed to update target %d: %v", id, err)
|
||||
}
|
||||
updatedCount++
|
||||
} else {
|
||||
// Target doesn't exist - add it
|
||||
logger.Debug("Sync: adding health check target %d", id)
|
||||
if err := m.addTargetUnsafe(config); err != nil {
|
||||
logger.Error("Sync: failed to add target %d: %v", id, err)
|
||||
return fmt.Errorf("failed to add target %d: %v", id, err)
|
||||
}
|
||||
addedCount++
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Sync complete: removed %d, added %d, updated %d targets",
|
||||
len(toRemove), addedCount, updatedCount)
|
||||
|
||||
// Notify callback if any changes were made
|
||||
if (len(toRemove) > 0 || addedCount > 0 || updatedCount > 0) && m.callback != nil {
|
||||
go m.callback(m.getAllTargetsUnsafe())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
220
main.go
220
main.go
@@ -116,7 +116,6 @@ var (
|
||||
logLevel string
|
||||
interfaceName string
|
||||
port uint16
|
||||
portStr string
|
||||
disableClients bool
|
||||
updownScript string
|
||||
dockerSocket string
|
||||
@@ -137,7 +136,6 @@ var (
|
||||
authDaemonPrincipalsFile string
|
||||
authDaemonCACertPath string
|
||||
authDaemonEnabled bool
|
||||
authDaemonGenerateRandomPassword bool
|
||||
// Build/version (can be overridden via -ldflags "-X main.newtVersion=...")
|
||||
newtVersion = "version_replaceme"
|
||||
|
||||
@@ -212,12 +210,11 @@ func runNewtMain(ctx context.Context) {
|
||||
logLevel = os.Getenv("LOG_LEVEL")
|
||||
updownScript = os.Getenv("UPDOWN_SCRIPT")
|
||||
interfaceName = os.Getenv("INTERFACE")
|
||||
portStr = os.Getenv("PORT")
|
||||
portStr := os.Getenv("PORT")
|
||||
authDaemonKey = os.Getenv("AD_KEY")
|
||||
authDaemonPrincipalsFile = os.Getenv("AD_PRINCIPALS_FILE")
|
||||
authDaemonCACertPath = os.Getenv("AD_CA_CERT_PATH")
|
||||
authDaemonEnabledEnv := os.Getenv("AUTH_DAEMON_ENABLED")
|
||||
authDaemonGenerateRandomPasswordEnv := os.Getenv("AD_GENERATE_RANDOM_PASSWORD")
|
||||
|
||||
// Metrics/observability env mirrors
|
||||
metricsEnabledEnv := os.Getenv("NEWT_METRICS_PROMETHEUS_ENABLED")
|
||||
@@ -347,6 +344,15 @@ func runNewtMain(ctx context.Context) {
|
||||
pingTimeout = 5 * time.Second
|
||||
}
|
||||
|
||||
if portStr != "" {
|
||||
portInt, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to parse PORT, choosing a random port")
|
||||
} else {
|
||||
port = uint16(portInt)
|
||||
}
|
||||
}
|
||||
|
||||
if dockerEnforceNetworkValidation == "" {
|
||||
flag.StringVar(&dockerEnforceNetworkValidation, "docker-enforce-network-validation", "false", "Enforce validation of container on newt network (true or false)")
|
||||
}
|
||||
@@ -414,13 +420,6 @@ func runNewtMain(ctx context.Context) {
|
||||
authDaemonEnabled = v
|
||||
}
|
||||
}
|
||||
if authDaemonGenerateRandomPasswordEnv == "" {
|
||||
flag.BoolVar(&authDaemonGenerateRandomPassword, "ad-generate-random-password", false, "Generate a random password for authenticated users")
|
||||
} else {
|
||||
if v, err := strconv.ParseBool(authDaemonGenerateRandomPasswordEnv); err == nil {
|
||||
authDaemonGenerateRandomPassword = v
|
||||
}
|
||||
}
|
||||
|
||||
// do a --version check
|
||||
version := flag.Bool("version", false, "Print the version")
|
||||
@@ -432,15 +431,6 @@ func runNewtMain(ctx context.Context) {
|
||||
tlsClientCAs = append(tlsClientCAs, tlsClientCAsFlag...)
|
||||
}
|
||||
|
||||
if portStr != "" {
|
||||
portInt, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to parse PORT, choosing a random port")
|
||||
} else {
|
||||
port = uint16(portInt)
|
||||
}
|
||||
}
|
||||
|
||||
if *version {
|
||||
fmt.Println("Newt version " + newtVersion)
|
||||
os.Exit(0)
|
||||
@@ -565,7 +555,7 @@ func runNewtMain(ctx context.Context) {
|
||||
id, // CLI arg takes precedence
|
||||
secret, // CLI arg takes precedence
|
||||
endpoint,
|
||||
30*time.Second,
|
||||
pingInterval,
|
||||
pingTimeout,
|
||||
opt,
|
||||
)
|
||||
@@ -618,8 +608,6 @@ func runNewtMain(ctx context.Context) {
|
||||
var connected bool
|
||||
var wgData WgData
|
||||
var dockerEventMonitor *docker.EventMonitor
|
||||
|
||||
logger.Debug("++++++++++++++++++++++ the port is %d", port)
|
||||
|
||||
if !disableClients {
|
||||
setupClients(client)
|
||||
@@ -959,7 +947,7 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
"publicKey": publicKey.String(),
|
||||
"pingResults": pingResults,
|
||||
"newtVersion": newtVersion,
|
||||
}, 2*time.Second)
|
||||
}, 1*time.Second)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -1062,7 +1050,7 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
"publicKey": publicKey.String(),
|
||||
"pingResults": pingResults,
|
||||
"newtVersion": newtVersion,
|
||||
}, 2*time.Second)
|
||||
}, 1*time.Second)
|
||||
|
||||
logger.Debug("Sent exit node ping results to cloud for selection: pingResults=%+v", pingResults)
|
||||
})
|
||||
@@ -1167,153 +1155,6 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
}
|
||||
})
|
||||
|
||||
// Register handler for syncing targets (TCP, UDP, and health checks)
|
||||
client.RegisterHandler("newt/sync", func(msg websocket.WSMessage) {
|
||||
logger.Info("Received sync message")
|
||||
|
||||
// if there is no wgData or pm, we can't sync targets
|
||||
if wgData.TunnelIP == "" || pm == nil {
|
||||
logger.Info(msgNoTunnelOrProxy)
|
||||
return
|
||||
}
|
||||
|
||||
// Define the sync data structure
|
||||
type SyncData struct {
|
||||
Targets TargetsByType `json:"targets"`
|
||||
HealthCheckTargets []healthcheck.Config `json:"healthCheckTargets"`
|
||||
}
|
||||
|
||||
var syncData SyncData
|
||||
jsonData, err := json.Marshal(msg.Data)
|
||||
if err != nil {
|
||||
logger.Error("Error marshaling sync data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(jsonData, &syncData); err != nil {
|
||||
logger.Error("Error unmarshaling sync data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("Sync data received: TCP targets=%d, UDP targets=%d, health check targets=%d",
|
||||
len(syncData.Targets.TCP), len(syncData.Targets.UDP), len(syncData.HealthCheckTargets))
|
||||
|
||||
//TODO: TEST AND IMPLEMENT THIS
|
||||
|
||||
// // Build sets of desired targets (port -> target string)
|
||||
// desiredTCP := make(map[int]string)
|
||||
// for _, t := range syncData.Targets.TCP {
|
||||
// parts := strings.Split(t, ":")
|
||||
// if len(parts) != 3 {
|
||||
// logger.Warn("Invalid TCP target format: %s", t)
|
||||
// continue
|
||||
// }
|
||||
// port := 0
|
||||
// if _, err := fmt.Sscanf(parts[0], "%d", &port); err != nil {
|
||||
// logger.Warn("Invalid port in TCP target: %s", parts[0])
|
||||
// continue
|
||||
// }
|
||||
// desiredTCP[port] = parts[1] + ":" + parts[2]
|
||||
// }
|
||||
|
||||
// desiredUDP := make(map[int]string)
|
||||
// for _, t := range syncData.Targets.UDP {
|
||||
// parts := strings.Split(t, ":")
|
||||
// if len(parts) != 3 {
|
||||
// logger.Warn("Invalid UDP target format: %s", t)
|
||||
// continue
|
||||
// }
|
||||
// port := 0
|
||||
// if _, err := fmt.Sscanf(parts[0], "%d", &port); err != nil {
|
||||
// logger.Warn("Invalid port in UDP target: %s", parts[0])
|
||||
// continue
|
||||
// }
|
||||
// desiredUDP[port] = parts[1] + ":" + parts[2]
|
||||
// }
|
||||
|
||||
// // Get current targets from proxy manager
|
||||
// currentTCP, currentUDP := pm.GetTargets()
|
||||
|
||||
// // Sync TCP targets
|
||||
// // Remove TCP targets not in desired set
|
||||
// if tcpForIP, ok := currentTCP[wgData.TunnelIP]; ok {
|
||||
// for port := range tcpForIP {
|
||||
// if _, exists := desiredTCP[port]; !exists {
|
||||
// logger.Info("Sync: removing TCP target on port %d", port)
|
||||
// targetStr := fmt.Sprintf("%d:%s", port, tcpForIP[port])
|
||||
// updateTargets(pm, "remove", wgData.TunnelIP, "tcp", TargetData{Targets: []string{targetStr}})
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Add TCP targets that are missing
|
||||
// for port, target := range desiredTCP {
|
||||
// needsAdd := true
|
||||
// if tcpForIP, ok := currentTCP[wgData.TunnelIP]; ok {
|
||||
// if currentTarget, exists := tcpForIP[port]; exists {
|
||||
// // Check if target address changed
|
||||
// if currentTarget == target {
|
||||
// needsAdd = false
|
||||
// } else {
|
||||
// // Target changed, remove old one first
|
||||
// logger.Info("Sync: updating TCP target on port %d", port)
|
||||
// targetStr := fmt.Sprintf("%d:%s", port, currentTarget)
|
||||
// updateTargets(pm, "remove", wgData.TunnelIP, "tcp", TargetData{Targets: []string{targetStr}})
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if needsAdd {
|
||||
// logger.Info("Sync: adding TCP target on port %d -> %s", port, target)
|
||||
// targetStr := fmt.Sprintf("%d:%s", port, target)
|
||||
// updateTargets(pm, "add", wgData.TunnelIP, "tcp", TargetData{Targets: []string{targetStr}})
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Sync UDP targets
|
||||
// // Remove UDP targets not in desired set
|
||||
// if udpForIP, ok := currentUDP[wgData.TunnelIP]; ok {
|
||||
// for port := range udpForIP {
|
||||
// if _, exists := desiredUDP[port]; !exists {
|
||||
// logger.Info("Sync: removing UDP target on port %d", port)
|
||||
// targetStr := fmt.Sprintf("%d:%s", port, udpForIP[port])
|
||||
// updateTargets(pm, "remove", wgData.TunnelIP, "udp", TargetData{Targets: []string{targetStr}})
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Add UDP targets that are missing
|
||||
// for port, target := range desiredUDP {
|
||||
// needsAdd := true
|
||||
// if udpForIP, ok := currentUDP[wgData.TunnelIP]; ok {
|
||||
// if currentTarget, exists := udpForIP[port]; exists {
|
||||
// // Check if target address changed
|
||||
// if currentTarget == target {
|
||||
// needsAdd = false
|
||||
// } else {
|
||||
// // Target changed, remove old one first
|
||||
// logger.Info("Sync: updating UDP target on port %d", port)
|
||||
// targetStr := fmt.Sprintf("%d:%s", port, currentTarget)
|
||||
// updateTargets(pm, "remove", wgData.TunnelIP, "udp", TargetData{Targets: []string{targetStr}})
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if needsAdd {
|
||||
// logger.Info("Sync: adding UDP target on port %d -> %s", port, target)
|
||||
// targetStr := fmt.Sprintf("%d:%s", port, target)
|
||||
// updateTargets(pm, "add", wgData.TunnelIP, "udp", TargetData{Targets: []string{targetStr}})
|
||||
// }
|
||||
// }
|
||||
|
||||
// // Sync health check targets
|
||||
// if err := healthMonitor.SyncTargets(syncData.HealthCheckTargets); err != nil {
|
||||
// logger.Error("Failed to sync health check targets: %v", err)
|
||||
// } else {
|
||||
// logger.Info("Successfully synced health check targets")
|
||||
// }
|
||||
|
||||
logger.Info("Sync complete")
|
||||
})
|
||||
|
||||
// Register handler for Docker socket check
|
||||
client.RegisterHandler("newt/socket/check", func(msg websocket.WSMessage) {
|
||||
logger.Debug("Received Docker socket check request")
|
||||
@@ -1537,18 +1378,15 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
|
||||
// Define the structure of the incoming message
|
||||
type SSHCertData struct {
|
||||
MessageId int `json:"messageId"`
|
||||
AgentPort int `json:"agentPort"`
|
||||
AgentHost string `json:"agentHost"`
|
||||
ExternalAuthDaemon bool `json:"externalAuthDaemon"`
|
||||
CACert string `json:"caCert"`
|
||||
Username string `json:"username"`
|
||||
NiceID string `json:"niceId"`
|
||||
Metadata struct {
|
||||
SudoMode string `json:"sudoMode"`
|
||||
SudoCommands []string `json:"sudoCommands"`
|
||||
Homedir bool `json:"homedir"`
|
||||
Groups []string `json:"groups"`
|
||||
MessageId int `json:"messageId"`
|
||||
AgentPort int `json:"agentPort"`
|
||||
AgentHost string `json:"agentHost"`
|
||||
CACert string `json:"caCert"`
|
||||
Username string `json:"username"`
|
||||
NiceID string `json:"niceId"`
|
||||
Metadata struct {
|
||||
Sudo bool `json:"sudo"`
|
||||
Homedir bool `json:"homedir"`
|
||||
} `json:"metadata"`
|
||||
}
|
||||
|
||||
@@ -1568,7 +1406,7 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
}
|
||||
|
||||
// Check if we're running the auth daemon internally
|
||||
if authDaemonServer != nil && !certData.ExternalAuthDaemon { // if the auth daemon is running internally and the external auth daemon is not enabled
|
||||
if authDaemonServer != nil {
|
||||
// Call ProcessConnection directly when running internally
|
||||
logger.Debug("Calling internal auth daemon ProcessConnection for user %s", certData.Username)
|
||||
|
||||
@@ -1577,10 +1415,8 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
NiceId: certData.NiceID,
|
||||
Username: certData.Username,
|
||||
Metadata: authdaemon.ConnectionMetadata{
|
||||
SudoMode: certData.Metadata.SudoMode,
|
||||
SudoCommands: certData.Metadata.SudoCommands,
|
||||
Homedir: certData.Metadata.Homedir,
|
||||
Groups: certData.Metadata.Groups,
|
||||
Sudo: certData.Metadata.Sudo,
|
||||
Homedir: certData.Metadata.Homedir,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -1614,10 +1450,8 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
"niceId": certData.NiceID,
|
||||
"username": certData.Username,
|
||||
"metadata": map[string]interface{}{
|
||||
"sudoMode": certData.Metadata.SudoMode,
|
||||
"sudoCommands": certData.Metadata.SudoCommands,
|
||||
"homedir": certData.Metadata.Homedir,
|
||||
"groups": certData.Metadata.Groups,
|
||||
"sudo": certData.Metadata.Sudo,
|
||||
"homedir": certData.Metadata.Homedir,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -48,21 +48,113 @@ type SubnetRule struct {
|
||||
PortRanges []PortRange // empty slice means all ports allowed
|
||||
}
|
||||
|
||||
// GetAllRules returns a copy of all subnet rules
|
||||
func (sl *SubnetLookup) GetAllRules() []SubnetRule {
|
||||
// ruleKey is used as a map key for fast O(1) lookups
|
||||
type ruleKey struct {
|
||||
sourcePrefix string
|
||||
destPrefix string
|
||||
}
|
||||
|
||||
// SubnetLookup provides fast IP subnet and port matching with O(1) lookup performance
|
||||
type SubnetLookup struct {
|
||||
mu sync.RWMutex
|
||||
rules map[ruleKey]*SubnetRule // Map for O(1) lookups by prefix combination
|
||||
}
|
||||
|
||||
// NewSubnetLookup creates a new subnet lookup table
|
||||
func NewSubnetLookup() *SubnetLookup {
|
||||
return &SubnetLookup{
|
||||
rules: make(map[ruleKey]*SubnetRule),
|
||||
}
|
||||
}
|
||||
|
||||
// AddSubnet adds a subnet rule with source and destination prefixes and optional port restrictions
|
||||
// If portRanges is nil or empty, all ports are allowed for this subnet
|
||||
// rewriteTo can be either an IP/CIDR (e.g., "192.168.1.1/32") or a domain name (e.g., "example.com")
|
||||
func (sl *SubnetLookup) AddSubnet(sourcePrefix, destPrefix netip.Prefix, rewriteTo string, portRanges []PortRange, disableIcmp bool) {
|
||||
sl.mu.Lock()
|
||||
defer sl.mu.Unlock()
|
||||
|
||||
key := ruleKey{
|
||||
sourcePrefix: sourcePrefix.String(),
|
||||
destPrefix: destPrefix.String(),
|
||||
}
|
||||
|
||||
sl.rules[key] = &SubnetRule{
|
||||
SourcePrefix: sourcePrefix,
|
||||
DestPrefix: destPrefix,
|
||||
DisableIcmp: disableIcmp,
|
||||
RewriteTo: rewriteTo,
|
||||
PortRanges: portRanges,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSubnet removes a subnet rule from the lookup table
|
||||
func (sl *SubnetLookup) RemoveSubnet(sourcePrefix, destPrefix netip.Prefix) {
|
||||
sl.mu.Lock()
|
||||
defer sl.mu.Unlock()
|
||||
|
||||
key := ruleKey{
|
||||
sourcePrefix: sourcePrefix.String(),
|
||||
destPrefix: destPrefix.String(),
|
||||
}
|
||||
|
||||
delete(sl.rules, key)
|
||||
}
|
||||
|
||||
// Match checks if a source IP, destination IP, port, and protocol match any subnet rule
|
||||
// Returns the matched rule if ALL of these conditions are met:
|
||||
// - The source IP is in the rule's source prefix
|
||||
// - The destination IP is in the rule's destination prefix
|
||||
// - The port is in an allowed range (or no port restrictions exist)
|
||||
// - The protocol matches (or the port range allows both protocols)
|
||||
//
|
||||
// proto should be header.TCPProtocolNumber or header.UDPProtocolNumber
|
||||
// Returns nil if no rule matches
|
||||
func (sl *SubnetLookup) Match(srcIP, dstIP netip.Addr, port uint16, proto tcpip.TransportProtocolNumber) *SubnetRule {
|
||||
sl.mu.RLock()
|
||||
defer sl.mu.RUnlock()
|
||||
|
||||
var rules []SubnetRule
|
||||
for _, destTriePtr := range sl.sourceTrie.All() {
|
||||
if destTriePtr == nil {
|
||||
// Iterate through all rules to find matching source and destination prefixes
|
||||
// This is O(n) but necessary since we need to check prefix containment, not exact match
|
||||
for _, rule := range sl.rules {
|
||||
// Check if source and destination IPs match their respective prefixes
|
||||
if !rule.SourcePrefix.Contains(srcIP) {
|
||||
continue
|
||||
}
|
||||
for _, rule := range destTriePtr.rules {
|
||||
rules = append(rules, *rule)
|
||||
if !rule.DestPrefix.Contains(dstIP) {
|
||||
continue
|
||||
}
|
||||
|
||||
if rule.DisableIcmp && (proto == header.ICMPv4ProtocolNumber || proto == header.ICMPv6ProtocolNumber) {
|
||||
// ICMP is disabled for this subnet
|
||||
return nil
|
||||
}
|
||||
|
||||
// Both IPs match - now check port restrictions
|
||||
// If no port ranges specified, all ports are allowed
|
||||
if len(rule.PortRanges) == 0 {
|
||||
return rule
|
||||
}
|
||||
|
||||
// Check if port and protocol are in any of the allowed ranges
|
||||
for _, pr := range rule.PortRanges {
|
||||
if port >= pr.Min && port <= pr.Max {
|
||||
// Check protocol compatibility
|
||||
if pr.Protocol == "" {
|
||||
// Empty protocol means allow both TCP and UDP
|
||||
return rule
|
||||
}
|
||||
// Check if the packet protocol matches the port range protocol
|
||||
if (pr.Protocol == "tcp" && proto == header.TCPProtocolNumber) ||
|
||||
(pr.Protocol == "udp" && proto == header.UDPProtocolNumber) {
|
||||
return rule
|
||||
}
|
||||
// Port matches but protocol doesn't - continue checking other ranges
|
||||
}
|
||||
}
|
||||
}
|
||||
return rules
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// connKey uniquely identifies a connection for NAT tracking
|
||||
@@ -74,17 +166,6 @@ type connKey struct {
|
||||
proto uint8
|
||||
}
|
||||
|
||||
// reverseConnKey uniquely identifies a connection for reverse NAT lookup (reply direction)
|
||||
// Key structure: (rewrittenTo, originalSrcIP, originalSrcPort, originalDstPort, proto)
|
||||
// This allows O(1) lookup of NAT entries for reply packets
|
||||
type reverseConnKey struct {
|
||||
rewrittenTo string // The address we rewrote to (becomes src in replies)
|
||||
originalSrcIP string // Original source IP (becomes dst in replies)
|
||||
originalSrcPort uint16 // Original source port (becomes dst port in replies)
|
||||
originalDstPort uint16 // Original destination port (becomes src port in replies)
|
||||
proto uint8
|
||||
}
|
||||
|
||||
// destKey identifies a destination for handler lookups (without source port since it may change)
|
||||
type destKey struct {
|
||||
srcIP string
|
||||
@@ -109,8 +190,7 @@ type ProxyHandler struct {
|
||||
icmpHandler *ICMPHandler
|
||||
subnetLookup *SubnetLookup
|
||||
natTable map[connKey]*natState
|
||||
reverseNatTable map[reverseConnKey]*natState // Reverse lookup map for O(1) reply packet NAT
|
||||
destRewriteTable map[destKey]netip.Addr // Maps original dest to rewritten dest for handler lookups
|
||||
destRewriteTable map[destKey]netip.Addr // Maps original dest to rewritten dest for handler lookups
|
||||
natMu sync.RWMutex
|
||||
enabled bool
|
||||
icmpReplies chan []byte // Channel for ICMP reply packets to be sent back through the tunnel
|
||||
@@ -135,7 +215,6 @@ func NewProxyHandler(options ProxyHandlerOptions) (*ProxyHandler, error) {
|
||||
enabled: true,
|
||||
subnetLookup: NewSubnetLookup(),
|
||||
natTable: make(map[connKey]*natState),
|
||||
reverseNatTable: make(map[reverseConnKey]*natState),
|
||||
destRewriteTable: make(map[destKey]netip.Addr),
|
||||
icmpReplies: make(chan []byte, 256), // Buffer for ICMP reply packets
|
||||
proxyEp: channel.New(1024, uint32(options.MTU), ""),
|
||||
@@ -217,14 +296,6 @@ func (p *ProxyHandler) RemoveSubnetRule(sourcePrefix, destPrefix netip.Prefix) {
|
||||
p.subnetLookup.RemoveSubnet(sourcePrefix, destPrefix)
|
||||
}
|
||||
|
||||
// GetAllRules returns all subnet rules from the proxy handler
|
||||
func (p *ProxyHandler) GetAllRules() []SubnetRule {
|
||||
if p == nil || !p.enabled {
|
||||
return nil
|
||||
}
|
||||
return p.subnetLookup.GetAllRules()
|
||||
}
|
||||
|
||||
// LookupDestinationRewrite looks up the rewritten destination for a connection
|
||||
// This is used by TCP/UDP handlers to find the actual target address
|
||||
func (p *ProxyHandler) LookupDestinationRewrite(srcIP, dstIP string, dstPort uint16, proto uint8) (netip.Addr, bool) {
|
||||
@@ -446,23 +517,10 @@ func (p *ProxyHandler) HandleIncomingPacket(packet []byte) bool {
|
||||
|
||||
// Store NAT state for this connection
|
||||
p.natMu.Lock()
|
||||
natEntry := &natState{
|
||||
p.natTable[key] = &natState{
|
||||
originalDst: dstAddr,
|
||||
rewrittenTo: newDst,
|
||||
}
|
||||
p.natTable[key] = natEntry
|
||||
|
||||
// Create reverse lookup key for O(1) reply packet lookups
|
||||
// Key: (rewrittenTo, originalSrcIP, originalSrcPort, originalDstPort, proto)
|
||||
reverseKey := reverseConnKey{
|
||||
rewrittenTo: newDst.String(),
|
||||
originalSrcIP: srcAddr.String(),
|
||||
originalSrcPort: srcPort,
|
||||
originalDstPort: dstPort,
|
||||
proto: uint8(protocol),
|
||||
}
|
||||
p.reverseNatTable[reverseKey] = natEntry
|
||||
|
||||
// Store destination rewrite for handler lookups
|
||||
p.destRewriteTable[dKey] = newDst
|
||||
p.natMu.Unlock()
|
||||
@@ -661,22 +719,20 @@ func (p *ProxyHandler) ReadOutgoingPacket() *buffer.View {
|
||||
return view
|
||||
}
|
||||
|
||||
// Look up NAT state for reverse translation using O(1) reverse lookup map
|
||||
// Key: (rewrittenTo, originalSrcIP, originalSrcPort, originalDstPort, proto)
|
||||
// For reply packets:
|
||||
// - reply's srcIP = rewrittenTo (the address we rewrote to)
|
||||
// - reply's dstIP = originalSrcIP (original source IP)
|
||||
// - reply's srcPort = originalDstPort (original destination port)
|
||||
// - reply's dstPort = originalSrcPort (original source port)
|
||||
// Look up NAT state for reverse translation
|
||||
// The key uses the original dst (before rewrite), so for replies we need to
|
||||
// find the entry where the rewritten address matches the current source
|
||||
p.natMu.RLock()
|
||||
reverseKey := reverseConnKey{
|
||||
rewrittenTo: srcIP.String(), // Reply's source is the rewritten address
|
||||
originalSrcIP: dstIP.String(), // Reply's destination is the original source
|
||||
originalSrcPort: dstPort, // Reply's destination port is the original source port
|
||||
originalDstPort: srcPort, // Reply's source port is the original destination port
|
||||
proto: uint8(protocol),
|
||||
var natEntry *natState
|
||||
for k, entry := range p.natTable {
|
||||
// Match: reply's dst should be original src, reply's src should be rewritten dst
|
||||
if k.srcIP == dstIP.String() && k.srcPort == dstPort &&
|
||||
entry.rewrittenTo.String() == srcIP.String() && k.dstPort == srcPort &&
|
||||
k.proto == uint8(protocol) {
|
||||
natEntry = entry
|
||||
break
|
||||
}
|
||||
}
|
||||
natEntry := p.reverseNatTable[reverseKey]
|
||||
p.natMu.RUnlock()
|
||||
|
||||
if natEntry != nil {
|
||||
|
||||
@@ -1,206 +0,0 @@
|
||||
package netstack2
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"sync"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
)
|
||||
|
||||
// SubnetLookup provides fast IP subnet and port matching using BART (Binary Aggregated Range Tree)
|
||||
// This uses BART Table for O(log n) prefix matching with Supernets() for efficient lookups
|
||||
//
|
||||
// Architecture:
|
||||
// - Two-level BART structure for matching both source AND destination prefixes
|
||||
// - Level 1: Source prefix -> Level 2 (destination prefix -> rules)
|
||||
// - This reduces search space: only check destination prefixes for matching source prefixes
|
||||
type SubnetLookup struct {
|
||||
mu sync.RWMutex
|
||||
// Two-level BART structure:
|
||||
// Level 1: Source prefix -> Level 2 (destination prefix -> rules)
|
||||
// This allows us to first match source prefix, then only check destination prefixes
|
||||
// for matching source prefixes, reducing the search space significantly
|
||||
sourceTrie *bart.Table[*destTrie]
|
||||
}
|
||||
|
||||
// destTrie is a BART for destination prefixes, containing the actual rules
|
||||
type destTrie struct {
|
||||
trie *bart.Table[[]*SubnetRule]
|
||||
rules []*SubnetRule // All rules for this source prefix (for iteration if needed)
|
||||
}
|
||||
|
||||
// NewSubnetLookup creates a new subnet lookup table using BART
|
||||
func NewSubnetLookup() *SubnetLookup {
|
||||
return &SubnetLookup{
|
||||
sourceTrie: &bart.Table[*destTrie]{},
|
||||
}
|
||||
}
|
||||
|
||||
// prefixEqual compares two prefixes after masking to handle host bits correctly.
|
||||
// For example, 10.0.0.5/24 and 10.0.0.0/24 are treated as equal.
|
||||
func prefixEqual(a, b netip.Prefix) bool {
|
||||
return a.Masked() == b.Masked()
|
||||
}
|
||||
|
||||
// AddSubnet adds a subnet rule with source and destination prefixes and optional port restrictions
|
||||
// If portRanges is nil or empty, all ports are allowed for this subnet
|
||||
// rewriteTo can be either an IP/CIDR (e.g., "192.168.1.1/32") or a domain name (e.g., "example.com")
|
||||
func (sl *SubnetLookup) AddSubnet(sourcePrefix, destPrefix netip.Prefix, rewriteTo string, portRanges []PortRange, disableIcmp bool) {
|
||||
sl.mu.Lock()
|
||||
defer sl.mu.Unlock()
|
||||
|
||||
rule := &SubnetRule{
|
||||
SourcePrefix: sourcePrefix,
|
||||
DestPrefix: destPrefix,
|
||||
DisableIcmp: disableIcmp,
|
||||
RewriteTo: rewriteTo,
|
||||
PortRanges: portRanges,
|
||||
}
|
||||
|
||||
// Canonicalize source prefix to handle host bits correctly
|
||||
canonicalSourcePrefix := sourcePrefix.Masked()
|
||||
|
||||
// Get or create destination trie for this source prefix
|
||||
destTriePtr, exists := sl.sourceTrie.Get(canonicalSourcePrefix)
|
||||
if !exists {
|
||||
// Create new destination trie for this source prefix
|
||||
destTriePtr = &destTrie{
|
||||
trie: &bart.Table[[]*SubnetRule]{},
|
||||
rules: make([]*SubnetRule, 0),
|
||||
}
|
||||
sl.sourceTrie.Insert(canonicalSourcePrefix, destTriePtr)
|
||||
}
|
||||
|
||||
// Canonicalize destination prefix to handle host bits correctly
|
||||
// BART masks prefixes internally, so we need to match that behavior in our bookkeeping
|
||||
canonicalDestPrefix := destPrefix.Masked()
|
||||
|
||||
// Add rule to destination trie
|
||||
// Original behavior: overwrite if same (sourcePrefix, destPrefix) exists
|
||||
// Store as single-element slice to match original overwrite behavior
|
||||
destTriePtr.trie.Insert(canonicalDestPrefix, []*SubnetRule{rule})
|
||||
|
||||
// Update destTriePtr.rules - remove old rule with same canonical prefix if exists, then add new one
|
||||
// Use canonical comparison to handle cases like 10.0.0.5/24 vs 10.0.0.0/24
|
||||
newRules := make([]*SubnetRule, 0, len(destTriePtr.rules)+1)
|
||||
for _, r := range destTriePtr.rules {
|
||||
if !prefixEqual(r.DestPrefix, canonicalDestPrefix) || !prefixEqual(r.SourcePrefix, canonicalSourcePrefix) {
|
||||
newRules = append(newRules, r)
|
||||
}
|
||||
}
|
||||
newRules = append(newRules, rule)
|
||||
destTriePtr.rules = newRules
|
||||
}
|
||||
|
||||
// RemoveSubnet removes a subnet rule from the lookup table
|
||||
func (sl *SubnetLookup) RemoveSubnet(sourcePrefix, destPrefix netip.Prefix) {
|
||||
sl.mu.Lock()
|
||||
defer sl.mu.Unlock()
|
||||
|
||||
// Canonicalize prefixes to handle host bits correctly
|
||||
canonicalSourcePrefix := sourcePrefix.Masked()
|
||||
canonicalDestPrefix := destPrefix.Masked()
|
||||
|
||||
destTriePtr, exists := sl.sourceTrie.Get(canonicalSourcePrefix)
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove the rule - original behavior: delete exact (sourcePrefix, destPrefix) combination
|
||||
// BART masks prefixes internally, so Delete works with canonical form
|
||||
destTriePtr.trie.Delete(canonicalDestPrefix)
|
||||
|
||||
// Also remove from destTriePtr.rules using canonical comparison
|
||||
// This ensures we remove rules even if they were added with host bits set
|
||||
newDestRules := make([]*SubnetRule, 0, len(destTriePtr.rules))
|
||||
for _, r := range destTriePtr.rules {
|
||||
if !prefixEqual(r.DestPrefix, canonicalDestPrefix) || !prefixEqual(r.SourcePrefix, canonicalSourcePrefix) {
|
||||
newDestRules = append(newDestRules, r)
|
||||
}
|
||||
}
|
||||
destTriePtr.rules = newDestRules
|
||||
|
||||
// Check if the trie is actually empty using BART's Size() method
|
||||
// This is more efficient than iterating and ensures we clean up empty tries
|
||||
// even if there were stale entries in the rules slice (which shouldn't happen
|
||||
// with proper canonicalization, but this provides a definitive check)
|
||||
if destTriePtr.trie.Size() == 0 {
|
||||
sl.sourceTrie.Delete(canonicalSourcePrefix)
|
||||
}
|
||||
}
|
||||
|
||||
// Match checks if a source IP, destination IP, port, and protocol match any subnet rule
|
||||
// Returns the matched rule if ALL of these conditions are met:
|
||||
// - The source IP is in the rule's source prefix
|
||||
// - The destination IP is in the rule's destination prefix
|
||||
// - The port is in an allowed range (or no port restrictions exist)
|
||||
// - The protocol matches (or the port range allows both protocols)
|
||||
//
|
||||
// proto should be header.TCPProtocolNumber, header.UDPProtocolNumber, or header.ICMPv4ProtocolNumber
|
||||
// Returns nil if no rule matches
|
||||
// This uses BART's Supernets() for O(log n) prefix matching instead of O(n) iteration
|
||||
func (sl *SubnetLookup) Match(srcIP, dstIP netip.Addr, port uint16, proto tcpip.TransportProtocolNumber) *SubnetRule {
|
||||
sl.mu.RLock()
|
||||
defer sl.mu.RUnlock()
|
||||
|
||||
// Convert IP addresses to /32 (IPv4) or /128 (IPv6) prefixes
|
||||
// Supernets() finds all prefixes that contain this IP (i.e., are supernets of /32 or /128)
|
||||
srcPrefix := netip.PrefixFrom(srcIP, srcIP.BitLen())
|
||||
dstPrefix := netip.PrefixFrom(dstIP, dstIP.BitLen())
|
||||
|
||||
// Step 1: Find all source prefixes that contain srcIP using BART's Supernets
|
||||
// This is O(log n) instead of O(n) iteration
|
||||
// Supernets returns all prefixes that are supernets (contain) the given prefix
|
||||
for _, destTriePtr := range sl.sourceTrie.Supernets(srcPrefix) {
|
||||
if destTriePtr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 2: Find all destination prefixes that contain dstIP
|
||||
// This is also O(log n) for each matching source prefix
|
||||
for _, rules := range destTriePtr.trie.Supernets(dstPrefix) {
|
||||
if rules == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 3: Check each rule for ICMP and port restrictions
|
||||
for _, rule := range rules {
|
||||
// Handle ICMP before port range check — ICMP has no ports
|
||||
if proto == header.ICMPv4ProtocolNumber || proto == header.ICMPv6ProtocolNumber {
|
||||
if rule.DisableIcmp {
|
||||
return nil
|
||||
}
|
||||
// ICMP is allowed; port ranges don't apply to ICMP
|
||||
return rule
|
||||
}
|
||||
|
||||
// Check port restrictions
|
||||
if len(rule.PortRanges) == 0 {
|
||||
// No port restrictions, match!
|
||||
return rule
|
||||
}
|
||||
|
||||
// Check if port and protocol are in any of the allowed ranges
|
||||
for _, pr := range rule.PortRanges {
|
||||
if port >= pr.Min && port <= pr.Max {
|
||||
// Check protocol compatibility
|
||||
if pr.Protocol == "" {
|
||||
// Empty protocol means allow both TCP and UDP
|
||||
return rule
|
||||
}
|
||||
// Check if the packet protocol matches the port range protocol
|
||||
if (pr.Protocol == "tcp" && proto == header.TCPProtocolNumber) ||
|
||||
(pr.Protocol == "udp" && proto == header.UDPProtocolNumber) {
|
||||
return rule
|
||||
}
|
||||
// Port matches but protocol doesn't - continue checking other ranges
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -369,15 +369,6 @@ func (net *Net) RemoveProxySubnetRule(sourcePrefix, destPrefix netip.Prefix) {
|
||||
}
|
||||
}
|
||||
|
||||
// GetProxySubnetRules returns all subnet rules from the proxy handler
|
||||
func (net *Net) GetProxySubnetRules() []SubnetRule {
|
||||
tun := (*netTun)(net)
|
||||
if tun.proxyHandler != nil {
|
||||
return tun.proxyHandler.GetAllRules()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProxyHandler returns the proxy handler (for advanced use cases)
|
||||
// Returns nil if proxy is not enabled
|
||||
func (net *Net) GetProxyHandler() *ProxyHandler {
|
||||
|
||||
2
newt.iss
2
newt.iss
@@ -32,7 +32,7 @@ DefaultGroupName={#MyAppName}
|
||||
DisableProgramGroupPage=yes
|
||||
; Uncomment the following line to run in non administrative install mode (install for current user only).
|
||||
;PrivilegesRequired=lowest
|
||||
OutputBaseFilename=newt_windows_installer
|
||||
OutputBaseFilename=mysetup
|
||||
SolidCompression=yes
|
||||
WizardStyle=modern
|
||||
; Add this to ensure PATH changes are applied and the system is prompted for a restart if needed
|
||||
|
||||
@@ -736,28 +736,3 @@ func (pm *ProxyManager) PrintTargets() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetTargets returns a copy of the current TCP and UDP targets
|
||||
// Returns map[listenIP]map[port]targetAddress for both TCP and UDP
|
||||
func (pm *ProxyManager) GetTargets() (tcpTargets map[string]map[int]string, udpTargets map[string]map[int]string) {
|
||||
pm.mutex.RLock()
|
||||
defer pm.mutex.RUnlock()
|
||||
|
||||
tcpTargets = make(map[string]map[int]string)
|
||||
for listenIP, targets := range pm.tcpTargets {
|
||||
tcpTargets[listenIP] = make(map[int]string)
|
||||
for port, targetAddr := range targets {
|
||||
tcpTargets[listenIP][port] = targetAddr
|
||||
}
|
||||
}
|
||||
|
||||
udpTargets = make(map[string]map[int]string)
|
||||
for listenIP, targets := range pm.udpTargets {
|
||||
udpTargets[listenIP] = make(map[int]string)
|
||||
for port, targetAddr := range targets {
|
||||
udpTargets[listenIP][port] = targetAddr
|
||||
}
|
||||
}
|
||||
|
||||
return tcpTargets, udpTargets
|
||||
}
|
||||
|
||||
22
scripts/append-release-notes.sh
Normal file
22
scripts/append-release-notes.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${TAG:?}"
|
||||
: "${GHCR_REF:?}"
|
||||
: "${DIGEST:?}"
|
||||
|
||||
NOTES_FILE="$(mktemp)"
|
||||
|
||||
existing_body="$(gh release view "${TAG}" --json body --jq '.body')"
|
||||
cat > "${NOTES_FILE}" <<EOF
|
||||
${existing_body}
|
||||
|
||||
## Container Images
|
||||
- GHCR: \`${GHCR_REF}\`
|
||||
- Docker Hub: \`${DH_REF:-N/A}\`
|
||||
**Digest:** \`${DIGEST}\`
|
||||
EOF
|
||||
|
||||
gh release edit "${TAG}" --draft --notes-file "${NOTES_FILE}"
|
||||
|
||||
rm -f "${NOTES_FILE}"
|
||||
11
scripts/nfpm.yaml.tmpl
Normal file
11
scripts/nfpm.yaml.tmpl
Normal file
@@ -0,0 +1,11 @@
|
||||
name: __PKG_NAME__
|
||||
arch: __ARCH__
|
||||
platform: linux
|
||||
version: __VERSION__
|
||||
section: net
|
||||
priority: optional
|
||||
maintainer: fosrl
|
||||
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||
contents:
|
||||
- src: build/newt
|
||||
dst: /usr/bin/newt
|
||||
149
scripts/publish-apt.sh
Normal file
149
scripts/publish-apt.sh
Normal file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ---- required env ----
|
||||
: "${GH_REPO:?}"
|
||||
: "${S3_BUCKET:?}"
|
||||
: "${AWS_REGION:?}"
|
||||
: "${CLOUDFRONT_DISTRIBUTION_ID:?}"
|
||||
: "${PKG_NAME:?}"
|
||||
: "${SUITE:?}"
|
||||
: "${COMPONENT:?}"
|
||||
: "${APT_GPG_PRIVATE_KEY:?}"
|
||||
|
||||
S3_PREFIX="${S3_PREFIX:-}"
|
||||
if [[ -n "${S3_PREFIX}" && "${S3_PREFIX}" != */ ]]; then
|
||||
S3_PREFIX="${S3_PREFIX}/"
|
||||
fi
|
||||
|
||||
WORKDIR="$(pwd)"
|
||||
mkdir -p repo/apt assets build
|
||||
|
||||
download_asset() {
|
||||
local tag="$1"
|
||||
local pattern="$2"
|
||||
local attempts=12
|
||||
|
||||
for attempt in $(seq 1 "${attempts}"); do
|
||||
if gh release download "${tag}" -R "${GH_REPO}" -p "${pattern}" -D assets; then
|
||||
return 0
|
||||
fi
|
||||
echo "Asset ${pattern} not available yet (attempt ${attempt}/${attempts}); retrying..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "ERROR: Failed to download asset ${pattern} for ${tag} after ${attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
echo "${APT_GPG_PRIVATE_KEY}" | gpg --batch --import >/dev/null 2>&1 || true
|
||||
|
||||
KEYID="$(gpg --list-secret-keys --with-colons | awk -F: '$1=="sec"{print $5; exit}')"
|
||||
if [[ -z "${KEYID}" ]]; then
|
||||
echo "ERROR: No GPG secret key available after import."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine which tags to process
|
||||
TAGS=""
|
||||
if [[ "${BACKFILL_ALL:-false}" == "true" ]]; then
|
||||
echo "Backfill mode: collecting all release tags..."
|
||||
TAGS="$(gh release list -R "${GH_REPO}" --limit 200 --json tagName --jq '.[].tagName')"
|
||||
else
|
||||
if [[ -n "${INPUT_TAG:-}" ]]; then
|
||||
TAGS="${INPUT_TAG}"
|
||||
elif [[ -n "${EVENT_TAG:-}" ]]; then
|
||||
TAGS="${EVENT_TAG}"
|
||||
elif [[ -n "${PUSH_TAG:-}" ]]; then
|
||||
TAGS="${PUSH_TAG}"
|
||||
else
|
||||
echo "No tag provided; using latest release tag..."
|
||||
TAGS="$(gh release view -R "${GH_REPO}" --json tagName --jq '.tagName')"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Tags to process:"
|
||||
printf '%s\n' "${TAGS}"
|
||||
|
||||
# Pull existing repo from S3 so we keep older versions
|
||||
echo "Sync existing repo from S3..."
|
||||
aws s3 sync "s3://${S3_BUCKET}/${S3_PREFIX}apt/" repo/apt/ >/dev/null 2>&1 || true
|
||||
|
||||
# Build and add packages
|
||||
while IFS= read -r TAG; do
|
||||
[[ -z "${TAG}" ]] && continue
|
||||
echo "=== Processing tag: ${TAG} ==="
|
||||
|
||||
rm -rf assets build
|
||||
mkdir -p assets build
|
||||
|
||||
deb_amd64="${PKG_NAME}_${TAG}_amd64.deb"
|
||||
deb_arm64="${PKG_NAME}_${TAG}_arm64.deb"
|
||||
|
||||
download_asset "${TAG}" "${deb_amd64}"
|
||||
download_asset "${TAG}" "${deb_arm64}"
|
||||
|
||||
if [[ ! -f "assets/${deb_amd64}" ]]; then
|
||||
echo "ERROR: Missing release asset: ${deb_amd64}"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "assets/${deb_arm64}" ]]; then
|
||||
echo "ERROR: Missing release asset: ${deb_arm64}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
cp -v assets/*.deb "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
|
||||
done <<< "${TAGS}"
|
||||
|
||||
# Regenerate metadata
|
||||
cd repo/apt
|
||||
|
||||
for arch in amd64 arm64; do
|
||||
mkdir -p "dists/${SUITE}/${COMPONENT}/binary-${arch}"
|
||||
dpkg-scanpackages -a "${arch}" pool > "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||
gzip -fk "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||
done
|
||||
|
||||
# Release file with hashes
|
||||
cat > apt-ftparchive.conf <<EOF
|
||||
APT::FTPArchive::Release::Origin "fosrl";
|
||||
APT::FTPArchive::Release::Label "newt";
|
||||
APT::FTPArchive::Release::Suite "${SUITE}";
|
||||
APT::FTPArchive::Release::Codename "${SUITE}";
|
||||
APT::FTPArchive::Release::Architectures "amd64 arm64";
|
||||
APT::FTPArchive::Release::Components "${COMPONENT}";
|
||||
APT::FTPArchive::Release::Description "Newt APT repository";
|
||||
EOF
|
||||
|
||||
apt-ftparchive -c apt-ftparchive.conf release "dists/${SUITE}" > "dists/${SUITE}/Release"
|
||||
|
||||
# Sign Release
|
||||
cd "dists/${SUITE}"
|
||||
|
||||
gpg --batch --yes --pinentry-mode loopback \
|
||||
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||
--local-user "${KEYID}" \
|
||||
--clearsign -o InRelease Release
|
||||
|
||||
gpg --batch --yes --pinentry-mode loopback \
|
||||
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||
--local-user "${KEYID}" \
|
||||
-abs -o Release.gpg Release
|
||||
|
||||
# Export public key into apt repo root
|
||||
cd ../../..
|
||||
gpg --batch --yes --armor --export "${KEYID}" > "${WORKDIR}/repo/apt/public.key"
|
||||
|
||||
# Upload to S3
|
||||
echo "Uploading to S3..."
|
||||
aws s3 sync "${WORKDIR}/repo/apt" "s3://${S3_BUCKET}/${S3_PREFIX}apt/" --delete
|
||||
|
||||
# Invalidate metadata
|
||||
echo "CloudFront invalidation..."
|
||||
aws cloudfront create-invalidation \
|
||||
--distribution-id "${CLOUDFRONT_DISTRIBUTION_ID}" \
|
||||
--paths "/${S3_PREFIX}apt/dists/*" "/${S3_PREFIX}apt/public.key"
|
||||
|
||||
echo "Done. Repo base: ${REPO_BASE_URL}"
|
||||
@@ -47,11 +47,6 @@ type Client struct {
|
||||
metricsCtx context.Context
|
||||
configNeedsSave bool // Flag to track if config needs to be saved
|
||||
serverVersion string
|
||||
configVersion int64 // Latest config version received from server
|
||||
configVersionMux sync.RWMutex
|
||||
processingMessage bool // Flag to track if a message is currently being processed
|
||||
processingMux sync.RWMutex // Protects processingMessage
|
||||
processingWg sync.WaitGroup // WaitGroup to wait for message processing to complete
|
||||
}
|
||||
|
||||
type ClientOption func(*Client)
|
||||
@@ -159,20 +154,6 @@ func (c *Client) GetServerVersion() string {
|
||||
return c.serverVersion
|
||||
}
|
||||
|
||||
// GetConfigVersion returns the latest config version received from server
|
||||
func (c *Client) GetConfigVersion() int64 {
|
||||
c.configVersionMux.RLock()
|
||||
defer c.configVersionMux.RUnlock()
|
||||
return c.configVersion
|
||||
}
|
||||
|
||||
// setConfigVersion updates the config version
|
||||
func (c *Client) setConfigVersion(version int64) {
|
||||
c.configVersionMux.Lock()
|
||||
defer c.configVersionMux.Unlock()
|
||||
c.configVersion = version
|
||||
}
|
||||
|
||||
// Connect establishes the WebSocket connection
|
||||
func (c *Client) Connect() error {
|
||||
go c.connectWithRetry()
|
||||
@@ -672,33 +653,12 @@ func (c *Client) pingMonitor() {
|
||||
if c.conn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip ping if a message is currently being processed
|
||||
c.processingMux.RLock()
|
||||
isProcessing := c.processingMessage
|
||||
c.processingMux.RUnlock()
|
||||
if isProcessing {
|
||||
logger.Debug("Skipping ping, message is being processed")
|
||||
continue
|
||||
}
|
||||
|
||||
c.configVersionMux.RLock()
|
||||
configVersion := c.configVersion
|
||||
c.configVersionMux.RUnlock()
|
||||
|
||||
pingMsg := WSMessage{
|
||||
Type: "newt/ping",
|
||||
Data: map[string]interface{}{},
|
||||
ConfigVersion: configVersion,
|
||||
}
|
||||
|
||||
c.writeMux.Lock()
|
||||
err := c.conn.WriteJSON(pingMsg)
|
||||
err := c.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(c.pingTimeout))
|
||||
if err == nil {
|
||||
telemetry.IncWSMessage(c.metricsContext(), "out", "ping")
|
||||
}
|
||||
c.writeMux.Unlock()
|
||||
|
||||
if err != nil {
|
||||
// Check if we're shutting down before logging error and reconnecting
|
||||
select {
|
||||
@@ -777,24 +737,9 @@ func (c *Client) readPumpWithDisconnectDetection(started time.Time) {
|
||||
}
|
||||
}
|
||||
|
||||
// Update config version from incoming message
|
||||
c.setConfigVersion(msg.ConfigVersion)
|
||||
|
||||
c.handlersMux.RLock()
|
||||
if handler, ok := c.handlers[msg.Type]; ok {
|
||||
// Mark that we're processing a message
|
||||
c.processingMux.Lock()
|
||||
c.processingMessage = true
|
||||
c.processingMux.Unlock()
|
||||
c.processingWg.Add(1)
|
||||
|
||||
handler(msg)
|
||||
|
||||
// Mark that we're done processing
|
||||
c.processingWg.Done()
|
||||
c.processingMux.Lock()
|
||||
c.processingMessage = false
|
||||
c.processingMux.Unlock()
|
||||
}
|
||||
c.handlersMux.RUnlock()
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ type TokenResponse struct {
|
||||
}
|
||||
|
||||
type WSMessage struct {
|
||||
Type string `json:"type"`
|
||||
Data interface{} `json:"data"`
|
||||
ConfigVersion int64 `json:"configVersion,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user