mirror of
https://github.com/fosrl/newt.git
synced 2026-03-13 02:14:56 -05:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6618bb4483 | ||
|
|
08952c20c5 | ||
|
|
5e60da37d1 | ||
|
|
53d79aea5a | ||
|
|
0f6852b681 | ||
|
|
2b8e280f2e | ||
|
|
3a377d43de | ||
|
|
792057cf6c | ||
|
|
57afe91e85 | ||
|
|
3389088c43 | ||
|
|
e73150c187 | ||
|
|
18556f34b2 | ||
|
|
66c235624a | ||
|
|
b7af49d759 | ||
|
|
00a5fa1f37 | ||
|
|
d256d6c746 | ||
|
|
2cc957d55f | ||
|
|
d98eaa88b3 | ||
|
|
5b884042cd | ||
|
|
2265b61381 | ||
|
|
50fbfdc262 |
@@ -1,5 +1,5 @@
|
||||
# Copy this file to .env and fill in your values
|
||||
# Required for connecting to Pangolin service
|
||||
PANGOLIN_ENDPOINT=https://example.com
|
||||
PANGOLIN_ENDPOINT=https://app.pangolin.net
|
||||
NEWT_ID=changeme-id
|
||||
NEWT_SECRET=changeme-secret
|
||||
466
.github/workflows/cicd.yml
vendored
466
.github/workflows/cicd.yml
vendored
@@ -20,16 +20,6 @@ on:
|
||||
description: "SemVer version to release (e.g., 1.2.3, no leading 'v')"
|
||||
required: true
|
||||
type: string
|
||||
publish_latest:
|
||||
description: "Also publish the 'latest' image tag"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
publish_minor:
|
||||
description: "Also publish the 'major.minor' image tag (e.g., 1.2)"
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
target_branch:
|
||||
description: "Branch to tag"
|
||||
required: false
|
||||
@@ -86,9 +76,6 @@ jobs:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -96,37 +83,6 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Capture created timestamp
|
||||
run: echo "IMAGE_CREATED=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Normalize image names to lowercase
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
||||
shell: bash
|
||||
|
||||
- name: Extract tag name
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
@@ -166,16 +122,6 @@ jobs:
|
||||
echo "Tag ${TAG} not visible after waiting"; exit 1
|
||||
shell: bash
|
||||
|
||||
- name: Update version in main.go
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ -f main.go ]; then
|
||||
sed -i 's/version_replaceme/'"$TAG"'/' main.go
|
||||
echo "Updated main.go with version $TAG"
|
||||
else
|
||||
echo "main.go not found"
|
||||
fi
|
||||
|
||||
- name: Ensure repository is at the tagged commit (dispatch only)
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: |
|
||||
@@ -200,38 +146,6 @@ jobs:
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Resolve publish-latest flag
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
PL_INPUT: ${{ inputs.publish_latest }}
|
||||
PL_VAR: ${{ vars.PUBLISH_LATEST }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
val="false"
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
if [ "${PL_INPUT}" = "true" ]; then val="true"; fi
|
||||
else
|
||||
if [ "${PL_VAR}" = "true" ]; then val="true"; fi
|
||||
fi
|
||||
echo "PUBLISH_LATEST=$val" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Resolve publish-minor flag
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
PM_INPUT: ${{ inputs.publish_minor }}
|
||||
PM_VAR: ${{ vars.PUBLISH_MINOR }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
val="false"
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
if [ "${PM_INPUT}" = "true" ]; then val="true"; fi
|
||||
else
|
||||
if [ "${PM_VAR}" = "true" ]; then val="true"; fi
|
||||
fi
|
||||
echo "PUBLISH_MINOR=$val" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Cache Go modules
|
||||
if: ${{ hashFiles('**/go.sum') != '' }}
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
@@ -250,326 +164,6 @@ jobs:
|
||||
go test ./... -race -covermode=atomic
|
||||
shell: bash
|
||||
|
||||
- name: Resolve license fallback
|
||||
run: echo "IMAGE_LICENSE=${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Resolve registries list (GHCR always, Docker Hub only if creds)
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
images="${GHCR_IMAGE}"
|
||||
if [ -n "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}" ] && [ -n "${{ secrets.DOCKER_HUB_USERNAME }}" ]; then
|
||||
images="${images}\n${DOCKERHUB_IMAGE}"
|
||||
fi
|
||||
{
|
||||
echo 'IMAGE_LIST<<EOF'
|
||||
echo -e "$images"
|
||||
echo 'EOF'
|
||||
} >> "$GITHUB_ENV"
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
|
||||
with:
|
||||
images: ${{ env.IMAGE_LIST }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=${{ env.TAG }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ env.TAG }},enable=${{ env.PUBLISH_MINOR == 'true' && env.IS_RC != 'true' }}
|
||||
type=raw,value=latest,enable=${{ env.IS_RC != 'true' }}
|
||||
flavor: |
|
||||
latest=false
|
||||
labels: |
|
||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
||||
org.opencontainers.image.version=${{ env.TAG }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.documentation=${{ github.event.repository.html_url }}
|
||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
||||
org.opencontainers.image.licenses=${{ env.IMAGE_LICENSE }}
|
||||
org.opencontainers.image.created=${{ env.IMAGE_CREATED }}
|
||||
org.opencontainers.image.ref.name=${{ env.TAG }}
|
||||
org.opencontainers.image.authors=${{ github.repository_owner }}
|
||||
- name: Echo build config (non-secret)
|
||||
shell: bash
|
||||
env:
|
||||
IMAGE_TITLE: ${{ github.event.repository.name }}
|
||||
IMAGE_VERSION: ${{ env.TAG }}
|
||||
IMAGE_REVISION: ${{ github.sha }}
|
||||
IMAGE_SOURCE_URL: ${{ github.event.repository.html_url }}
|
||||
IMAGE_URL: ${{ github.event.repository.html_url }}
|
||||
IMAGE_DESCRIPTION: ${{ github.event.repository.description }}
|
||||
IMAGE_LICENSE: ${{ env.IMAGE_LICENSE }}
|
||||
DOCKERHUB_IMAGE: ${{ env.DOCKERHUB_IMAGE }}
|
||||
GHCR_IMAGE: ${{ env.GHCR_IMAGE }}
|
||||
DOCKER_HUB_USER: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
REPO: ${{ github.repository }}
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
WORKFLOW_REF: ${{ github.workflow_ref }}
|
||||
REF: ${{ github.ref }}
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "=== OCI Label Values ==="
|
||||
echo "org.opencontainers.image.title=${IMAGE_TITLE}"
|
||||
echo "org.opencontainers.image.version=${IMAGE_VERSION}"
|
||||
echo "org.opencontainers.image.revision=${IMAGE_REVISION}"
|
||||
echo "org.opencontainers.image.source=${IMAGE_SOURCE_URL}"
|
||||
echo "org.opencontainers.image.url=${IMAGE_URL}"
|
||||
echo "org.opencontainers.image.description=${IMAGE_DESCRIPTION}"
|
||||
echo "org.opencontainers.image.licenses=${IMAGE_LICENSE}"
|
||||
echo
|
||||
echo "=== Images ==="
|
||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE}"
|
||||
echo "GHCR_IMAGE=${GHCR_IMAGE}"
|
||||
echo "DOCKER_HUB_USERNAME=${DOCKER_HUB_USER}"
|
||||
echo
|
||||
echo "=== GitHub Kontext ==="
|
||||
echo "repository=${REPO}"
|
||||
echo "owner=${OWNER}"
|
||||
echo "workflow_ref=${WORKFLOW_REF}"
|
||||
echo "ref=${REF}"
|
||||
echo "ref_name=${REF_NAME}"
|
||||
echo "run_url=${RUN_URL}"
|
||||
echo
|
||||
echo "=== docker/metadata-action outputs (Tags/Labels), raw ==="
|
||||
echo "::group::tags"
|
||||
echo "${{ steps.meta.outputs.tags }}"
|
||||
echo "::endgroup::"
|
||||
echo "::group::labels"
|
||||
echo "${{ steps.meta.outputs.labels }}"
|
||||
echo "::endgroup::"
|
||||
- name: Build and push (Docker Hub + GHCR)
|
||||
id: build
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=${{ github.repository }}
|
||||
cache-to: type=gha,mode=max,scope=${{ github.repository }}
|
||||
provenance: mode=max
|
||||
sbom: true
|
||||
|
||||
- name: Compute image digest refs
|
||||
run: |
|
||||
echo "DIGEST=${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
|
||||
echo "GHCR_REF=$GHCR_IMAGE@${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
|
||||
echo "DH_REF=$DOCKERHUB_IMAGE@${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
|
||||
echo "Built digest: ${{ steps.build.outputs.digest }}"
|
||||
shell: bash
|
||||
|
||||
- name: Attest build provenance (GHCR)
|
||||
id: attest-ghcr
|
||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||
with:
|
||||
subject-name: ${{ env.GHCR_IMAGE }}
|
||||
subject-digest: ${{ steps.build.outputs.digest }}
|
||||
push-to-registry: true
|
||||
show-summary: true
|
||||
|
||||
- name: Attest build provenance (Docker Hub)
|
||||
continue-on-error: true
|
||||
id: attest-dh
|
||||
uses: actions/attest-build-provenance@00014ed6ed5efc5b1ab7f7f34a39eb55d41aa4f8 # v3.1.0
|
||||
with:
|
||||
subject-name: index.docker.io/fosrl/${{ github.event.repository.name }}
|
||||
subject-digest: ${{ steps.build.outputs.digest }}
|
||||
push-to-registry: true
|
||||
show-summary: true
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
with:
|
||||
cosign-release: 'v3.0.2'
|
||||
|
||||
- name: Sanity check cosign private key
|
||||
env:
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cosign public-key --key env://COSIGN_PRIVATE_KEY >/dev/null
|
||||
shell: bash
|
||||
|
||||
- name: Sign GHCR image (digest) with key (recursive)
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Signing ${GHCR_REF} (digest) recursively with provided key"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${GHCR_REF}"
|
||||
echo "Waiting 30 seconds for signatures to propagate..."
|
||||
sleep 30
|
||||
shell: bash
|
||||
|
||||
- name: Generate SBOM (SPDX JSON)
|
||||
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
with:
|
||||
image-ref: ${{ env.GHCR_IMAGE }}@${{ steps.build.outputs.digest }}
|
||||
format: spdx-json
|
||||
output: sbom.spdx.json
|
||||
|
||||
- name: Validate SBOM JSON
|
||||
run: jq -e . sbom.spdx.json >/dev/null
|
||||
shell: bash
|
||||
|
||||
- name: Minify SBOM JSON (optional hardening)
|
||||
run: jq -c . sbom.spdx.json > sbom.min.json && mv sbom.min.json sbom.spdx.json
|
||||
shell: bash
|
||||
|
||||
- name: Create SBOM attestation (GHCR, private key)
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cosign attest \
|
||||
--key env://COSIGN_PRIVATE_KEY \
|
||||
--type spdxjson \
|
||||
--predicate sbom.spdx.json \
|
||||
"${GHCR_REF}"
|
||||
shell: bash
|
||||
|
||||
- name: Create SBOM attestation (Docker Hub, private key)
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
cosign attest \
|
||||
--key env://COSIGN_PRIVATE_KEY \
|
||||
--type spdxjson \
|
||||
--predicate sbom.spdx.json \
|
||||
"${DH_REF}"
|
||||
shell: bash
|
||||
|
||||
- name: Keyless sign & verify GHCR digest (OIDC)
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
WORKFLOW_REF: ${{ github.workflow_ref }} # owner/repo/.github/workflows/<file>@refs/tags/<tag>
|
||||
ISSUER: https://token.actions.githubusercontent.com
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Keyless signing ${GHCR_REF}"
|
||||
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${GHCR_REF}"
|
||||
echo "Verify keyless (OIDC) signature policy on ${GHCR_REF}"
|
||||
cosign verify \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${WORKFLOW_REF}" \
|
||||
"${GHCR_REF}" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Sign Docker Hub image (digest) with key (recursive)
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Signing ${DH_REF} (digest) recursively with provided key (Docker media types fallback)"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${DH_REF}"
|
||||
shell: bash
|
||||
|
||||
- name: Keyless sign & verify Docker Hub digest (OIDC)
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_YES: "true"
|
||||
ISSUER: https://token.actions.githubusercontent.com
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Keyless signing ${DH_REF} (force public-good Rekor)"
|
||||
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${DH_REF}"
|
||||
echo "Keyless verify via Rekor (strict identity)"
|
||||
if ! cosign verify \
|
||||
--rekor-url https://rekor.sigstore.dev \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
||||
"${DH_REF}" -o text; then
|
||||
echo "Rekor verify failed — retry offline bundle verify (no Rekor)"
|
||||
if ! cosign verify \
|
||||
--offline \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
||||
"${DH_REF}" -o text; then
|
||||
echo "Offline bundle verify failed — ignore tlog (TEMP for debugging)"
|
||||
cosign verify \
|
||||
--insecure-ignore-tlog=true \
|
||||
--certificate-oidc-issuer "${ISSUER}" \
|
||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
||||
"${DH_REF}" -o text || true
|
||||
fi
|
||||
fi
|
||||
- name: Verify signature (public key) GHCR digest + tag
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG_VAR="${TAG}"
|
||||
echo "Verifying (digest) ${GHCR_REF}"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "$GHCR_REF" -o text
|
||||
echo "Verifying (tag) $GHCR_IMAGE:$TAG_VAR"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "$GHCR_IMAGE:$TAG_VAR" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify SBOM attestation (GHCR)
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: cosign verify-attestation --key env://COSIGN_PUBLIC_KEY --type spdxjson "$GHCR_REF" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify SLSA provenance (GHCR)
|
||||
env:
|
||||
ISSUER: https://token.actions.githubusercontent.com
|
||||
WFREF: ${{ github.workflow_ref }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
# (optional) show which predicate types are present to aid debugging
|
||||
cosign download attestation "$GHCR_REF" \
|
||||
| jq -r '.payload | @base64d | fromjson | .predicateType' | sort -u || true
|
||||
# Verify the SLSA v1 provenance attestation (predicate URL)
|
||||
cosign verify-attestation \
|
||||
--type 'https://slsa.dev/provenance/v1' \
|
||||
--certificate-oidc-issuer "$ISSUER" \
|
||||
--certificate-identity "https://github.com/${WFREF}" \
|
||||
--rekor-url https://rekor.sigstore.dev \
|
||||
"$GHCR_REF" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify signature (public key) Docker Hub digest
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Verifying (digest) ${DH_REF} with Docker media types"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${DH_REF}" -o text
|
||||
shell: bash
|
||||
|
||||
- name: Verify signature (public key) Docker Hub tag
|
||||
continue-on-error: true
|
||||
env:
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
echo "Verifying (tag) $DOCKERHUB_IMAGE:$TAG with Docker media types"
|
||||
cosign verify --key env://COSIGN_PUBLIC_KEY "$DOCKERHUB_IMAGE:$TAG" -o text
|
||||
shell: bash
|
||||
|
||||
# - name: Trivy scan (GHCR image)
|
||||
# id: trivy
|
||||
# uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
@@ -589,28 +183,46 @@ jobs:
|
||||
# sarif_file: trivy-ghcr.sarif
|
||||
# category: Image Vulnerability Scan
|
||||
|
||||
- name: Build binaries
|
||||
env:
|
||||
CGO_ENABLED: "0"
|
||||
GOFLAGS: "-trimpath"
|
||||
#- name: Build binaries
|
||||
# env:
|
||||
# CGO_ENABLED: "0"
|
||||
# GOFLAGS: "-trimpath"
|
||||
# run: |
|
||||
# set -euo pipefail
|
||||
# TAG_VAR="${TAG}"
|
||||
# make -j 10 go-build-release tag=$TAG_VAR
|
||||
# shell: bash
|
||||
|
||||
- name: Ensure clean git state for GoReleaser
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG_VAR="${TAG}"
|
||||
make -j 10 go-build-release tag=$TAG_VAR
|
||||
shell: bash
|
||||
echo "Checking git status before GoReleaser..."
|
||||
git status --porcelain || true
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
echo "Repository contains local changes. Listing files and diff:"
|
||||
git status --porcelain
|
||||
git --no-pager diff --name-status || true
|
||||
echo "Resetting tracked files to HEAD to ensure a clean release state"
|
||||
git restore --source=HEAD --worktree --staged -- .
|
||||
echo "After reset git status:"
|
||||
git status --porcelain || true
|
||||
else
|
||||
echo "Repository clean."
|
||||
fi
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@5be0e66d93ac7ed76da52eca8bb058f665c3a5fe # v2.4.2
|
||||
- name: Run GoReleaser config check
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
tag_name: ${{ env.TAG }}
|
||||
generate_release_notes: true
|
||||
prerelease: ${{ env.IS_RC == 'true' }}
|
||||
files: |
|
||||
bin/*
|
||||
fail_on_unmatched_files: true
|
||||
draft: true
|
||||
body: |
|
||||
## Container Images
|
||||
- GHCR: `${{ env.GHCR_REF }}`
|
||||
- Docker Hub: `${{ env.DH_REF || 'N/A' }}`
|
||||
**Digest:** `${{ steps.build.outputs.digest }}`
|
||||
version: 2.14.0
|
||||
args: check
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run GoReleaser (binaries + deb/rpm/apk)
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: 2.14.0
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
20
.github/workflows/mirror.yaml
vendored
20
.github/workflows/mirror.yaml
vendored
@@ -1,20 +1,28 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
source_image:
|
||||
description: "Source image (e.g., docker.io/owner/newt)"
|
||||
required: true
|
||||
type: string
|
||||
dest_image:
|
||||
description: "Destination image (e.g., ghcr.io/owner/newt)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/newt
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SOURCE_IMAGE: ${{ inputs.source_image }}
|
||||
DEST_IMAGE: ${{ inputs.dest_image }}
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
|
||||
64
.github/workflows/publish-apt.yml
vendored
Normal file
64
.github/workflows/publish-apt.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: Publish APT repo to S3/CloudFront
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to publish (e.g. 1.9.0). Leave empty to use latest release."
|
||||
required: false
|
||||
type: string
|
||||
backfill_all:
|
||||
description: "Build/publish repo for ALL releases."
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
PKG_NAME: newt
|
||||
SUITE: stable
|
||||
COMPONENT: main
|
||||
REPO_BASE_URL: https://repo.dev.fosrl.io/apt
|
||||
|
||||
AWS_REGION: ${{ vars.AWS_REGION }}
|
||||
S3_BUCKET: ${{ vars.S3_BUCKET }}
|
||||
S3_PREFIX: ${{ vars.S3_PREFIX }}
|
||||
CLOUDFRONT_DISTRIBUTION_ID: ${{ vars.CLOUDFRONT_DISTRIBUTION_ID }}
|
||||
|
||||
INPUT_TAG: ${{ inputs.tag }}
|
||||
BACKFILL_ALL: ${{ inputs.backfill_all }}
|
||||
EVENT_TAG: ${{ github.event.release.tag_name }}
|
||||
PUSH_TAG: ${{ github.ref_name }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials (OIDC)
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
|
||||
aws-region: ${{ vars.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y dpkg-dev apt-utils gnupg curl jq gh
|
||||
|
||||
- name: Publish APT repo
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
APT_GPG_PRIVATE_KEY: ${{ secrets.APT_GPG_PRIVATE_KEY }}
|
||||
APT_GPG_PASSPHRASE: ${{ secrets.APT_GPG_PASSPHRASE }}
|
||||
run: ./scripts/publish-apt.sh
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
matrix:
|
||||
target:
|
||||
- local
|
||||
- docker-build
|
||||
#- docker-build
|
||||
- go-build-release-darwin-amd64
|
||||
- go-build-release-darwin-arm64
|
||||
- go-build-release-freebsd-amd64
|
||||
|
||||
43
.goreleaser.yaml
Normal file
43
.goreleaser.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
version: 2
|
||||
project_name: newt
|
||||
|
||||
release:
|
||||
draft: true
|
||||
prerelease: "{{ contains .Tag \"-rc.\" }}"
|
||||
name_template: "{{ .Tag }}"
|
||||
|
||||
builds:
|
||||
- id: newt
|
||||
main: ./main.go
|
||||
binary: newt
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -s -w -X main.newtVersion={{ .Tag }}
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
||||
nfpms:
|
||||
- id: packages
|
||||
package_name: newt
|
||||
vendor: fosrl
|
||||
maintainer: fosrl <repo@fosrl.io>
|
||||
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||
license: AGPL-3.0-or-later
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
- apk
|
||||
bindir: /usr/bin
|
||||
file_name_template: "newt_{{ .Version }}_{{ .Arch }}"
|
||||
contents:
|
||||
- src: LICENSE
|
||||
dst: /usr/share/doc/newt/LICENSE
|
||||
13
README.md
13
README.md
@@ -1,15 +1,24 @@
|
||||
# Newt
|
||||
|
||||
[](https://pkg.go.dev/github.com/fosrl/newt)
|
||||
[](https://github.com/fosrl/newt/blob/main/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/fosrl/newt)
|
||||
|
||||
Newt is a fully user space [WireGuard](https://www.wireguard.com/) tunnel client and TCP/UDP proxy, designed to securely expose private resources controlled by Pangolin. By using Newt, you don't need to manage complex WireGuard tunnels and NATing.
|
||||
|
||||
### Installation and Documentation
|
||||
## Installation and Documentation
|
||||
|
||||
Newt is used with Pangolin and Gerbil as part of the larger system. See documentation below:
|
||||
|
||||
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
||||
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
||||
|
||||
### Install via APT (Debian/Ubuntu)
|
||||
|
||||
```bash
|
||||
curl -fsSL https://repo.dev.fosrl.io/apt/public.key | sudo gpg --dearmor -o /usr/share/keyrings/newt-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/newt-archive-keyring.gpg] https://repo.dev.fosrl.io/apt stable main" | sudo tee /etc/apt/sources.list.d/newt.list
|
||||
sudo apt update && sudo apt install newt
|
||||
```
|
||||
|
||||
## Key Functions
|
||||
|
||||
|
||||
151
authdaemon.go
Normal file
151
authdaemon.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/fosrl/newt/authdaemon"
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPrincipalsPath = "/var/run/auth-daemon/principals"
|
||||
defaultCACertPath = "/etc/ssh/ca.pem"
|
||||
)
|
||||
|
||||
var (
|
||||
errPresharedKeyRequired = errors.New("auth-daemon-key is required when --auth-daemon is enabled")
|
||||
errRootRequired = errors.New("auth-daemon must be run as root (use sudo)")
|
||||
authDaemonServer *authdaemon.Server // Global auth daemon server instance
|
||||
)
|
||||
|
||||
// startAuthDaemon initializes and starts the auth daemon in the background.
|
||||
// It validates requirements (Linux, root, preshared key) and starts the server
|
||||
// in a goroutine so it runs alongside normal newt operation.
|
||||
func startAuthDaemon(ctx context.Context) error {
|
||||
// Validation
|
||||
if runtime.GOOS != "linux" {
|
||||
return fmt.Errorf("auth-daemon is only supported on Linux, not %s", runtime.GOOS)
|
||||
}
|
||||
if os.Geteuid() != 0 {
|
||||
return errRootRequired
|
||||
}
|
||||
|
||||
// Use defaults if not set
|
||||
principalsFile := authDaemonPrincipalsFile
|
||||
if principalsFile == "" {
|
||||
principalsFile = defaultPrincipalsPath
|
||||
}
|
||||
caCertPath := authDaemonCACertPath
|
||||
if caCertPath == "" {
|
||||
caCertPath = defaultCACertPath
|
||||
}
|
||||
|
||||
// Create auth daemon server
|
||||
cfg := authdaemon.Config{
|
||||
DisableHTTPS: true, // We run without HTTP server in newt
|
||||
PresharedKey: "this-key-is-not-used", // Not used in embedded mode, but set to non-empty to satisfy validation
|
||||
PrincipalsFilePath: principalsFile,
|
||||
CACertPath: caCertPath,
|
||||
Force: true,
|
||||
}
|
||||
|
||||
srv, err := authdaemon.NewServer(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create auth daemon server: %w", err)
|
||||
}
|
||||
|
||||
authDaemonServer = srv
|
||||
|
||||
// Start the auth daemon in a goroutine so it runs alongside newt
|
||||
go func() {
|
||||
logger.Info("Auth daemon starting (native mode, no HTTP server)")
|
||||
if err := srv.Run(ctx); err != nil {
|
||||
logger.Error("Auth daemon error: %v", err)
|
||||
}
|
||||
logger.Info("Auth daemon stopped")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
// runPrincipalsCmd executes the principals subcommand logic
|
||||
func runPrincipalsCmd(args []string) {
|
||||
opts := struct {
|
||||
PrincipalsFile string
|
||||
Username string
|
||||
}{
|
||||
PrincipalsFile: defaultPrincipalsPath,
|
||||
}
|
||||
|
||||
// Parse flags manually
|
||||
for i := 0; i < len(args); i++ {
|
||||
switch args[i] {
|
||||
case "--principals-file":
|
||||
if i+1 >= len(args) {
|
||||
fmt.Fprintf(os.Stderr, "Error: --principals-file requires a value\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
opts.PrincipalsFile = args[i+1]
|
||||
i++
|
||||
case "--username":
|
||||
if i+1 >= len(args) {
|
||||
fmt.Fprintf(os.Stderr, "Error: --username requires a value\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
opts.Username = args[i+1]
|
||||
i++
|
||||
case "--help", "-h":
|
||||
printPrincipalsHelp()
|
||||
os.Exit(0)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Error: unknown flag: %s\n", args[i])
|
||||
printPrincipalsHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Validation
|
||||
if opts.Username == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: username is required\n")
|
||||
printPrincipalsHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get principals
|
||||
list, err := authdaemon.GetPrincipals(opts.PrincipalsFile, opts.Username)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
fmt.Println("")
|
||||
return
|
||||
}
|
||||
for _, principal := range list {
|
||||
fmt.Println(principal)
|
||||
}
|
||||
}
|
||||
|
||||
func printPrincipalsHelp() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: newt principals [flags]
|
||||
|
||||
Output principals for a username (for AuthorizedPrincipalsCommand in sshd_config).
|
||||
Read the principals file and print principals that match the given username, one per line.
|
||||
Configure in sshd_config with AuthorizedPrincipalsCommand and %%u for the username.
|
||||
|
||||
Flags:
|
||||
--principals-file string Path to the principals file (default "%s")
|
||||
--username string Username to look up (required)
|
||||
--help, -h Show this help message
|
||||
|
||||
Example:
|
||||
newt principals --username alice
|
||||
|
||||
`, defaultPrincipalsPath)
|
||||
}
|
||||
27
authdaemon/connection.go
Normal file
27
authdaemon/connection.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// ProcessConnection runs the same logic as POST /connection: CA cert, user create/reconcile, principals.
|
||||
// Use this when DisableHTTPS is true (e.g. embedded in Newt) instead of calling the API.
|
||||
func (s *Server) ProcessConnection(req ConnectionRequest) {
|
||||
logger.Info("connection: niceId=%q username=%q metadata.sudo=%v metadata.homedir=%v",
|
||||
req.NiceId, req.Username, req.Metadata.Sudo, req.Metadata.Homedir)
|
||||
|
||||
cfg := &s.cfg
|
||||
if cfg.CACertPath != "" {
|
||||
if err := writeCACertIfNotExists(cfg.CACertPath, req.CaCert, cfg.Force); err != nil {
|
||||
logger.Warn("auth-daemon: write CA cert: %v", err)
|
||||
}
|
||||
}
|
||||
if err := ensureUser(req.Username, req.Metadata); err != nil {
|
||||
logger.Warn("auth-daemon: ensure user: %v", err)
|
||||
}
|
||||
if cfg.PrincipalsFilePath != "" {
|
||||
if err := writePrincipals(cfg.PrincipalsFilePath, req.Username, req.NiceId); err != nil {
|
||||
logger.Warn("auth-daemon: write principals: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
224
authdaemon/host_linux.go
Normal file
224
authdaemon/host_linux.go
Normal file
@@ -0,0 +1,224 @@
|
||||
//go:build linux
|
||||
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// writeCACertIfNotExists writes contents to path. If the file already exists: when force is false, skip; when force is true, overwrite only if content differs.
|
||||
func writeCACertIfNotExists(path, contents string, force bool) error {
|
||||
contents = strings.TrimSpace(contents)
|
||||
if contents != "" && !strings.HasSuffix(contents, "\n") {
|
||||
contents += "\n"
|
||||
}
|
||||
existing, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
existingStr := strings.TrimSpace(string(existing))
|
||||
if existingStr != "" && !strings.HasSuffix(existingStr, "\n") {
|
||||
existingStr += "\n"
|
||||
}
|
||||
if existingStr == contents {
|
||||
logger.Debug("auth-daemon: CA cert unchanged at %s, skipping write", path)
|
||||
return nil
|
||||
}
|
||||
if !force {
|
||||
logger.Debug("auth-daemon: CA cert already exists at %s, skipping write (Force disabled)", path)
|
||||
return nil
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %w", dir, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(contents), 0644); err != nil {
|
||||
return fmt.Errorf("write CA cert: %w", err)
|
||||
}
|
||||
logger.Info("auth-daemon: wrote CA cert to %s", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// writePrincipals updates the principals file at path: JSON object keyed by username, value is array of principals. Adds username and niceId to that user's list (deduped).
|
||||
func writePrincipals(path, username, niceId string) error {
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
username = strings.TrimSpace(username)
|
||||
niceId = strings.TrimSpace(niceId)
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
data := make(map[string][]string)
|
||||
if raw, err := os.ReadFile(path); err == nil {
|
||||
_ = json.Unmarshal(raw, &data)
|
||||
}
|
||||
list := data[username]
|
||||
seen := make(map[string]struct{}, len(list)+2)
|
||||
for _, p := range list {
|
||||
seen[p] = struct{}{}
|
||||
}
|
||||
for _, p := range []string{username, niceId} {
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[p]; !ok {
|
||||
seen[p] = struct{}{}
|
||||
list = append(list, p)
|
||||
}
|
||||
}
|
||||
data[username] = list
|
||||
body, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal principals: %w", err)
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %w", dir, err)
|
||||
}
|
||||
if err := os.WriteFile(path, body, 0644); err != nil {
|
||||
return fmt.Errorf("write principals: %w", err)
|
||||
}
|
||||
logger.Debug("auth-daemon: wrote principals to %s", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sudoGroup returns the name of the sudo group (wheel or sudo) that exists on the system. Prefers wheel.
|
||||
func sudoGroup() string {
|
||||
f, err := os.Open("/etc/group")
|
||||
if err != nil {
|
||||
return "sudo"
|
||||
}
|
||||
defer f.Close()
|
||||
sc := bufio.NewScanner(f)
|
||||
hasWheel := false
|
||||
hasSudo := false
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.HasPrefix(line, "wheel:") {
|
||||
hasWheel = true
|
||||
}
|
||||
if strings.HasPrefix(line, "sudo:") {
|
||||
hasSudo = true
|
||||
}
|
||||
}
|
||||
if hasWheel {
|
||||
return "wheel"
|
||||
}
|
||||
if hasSudo {
|
||||
return "sudo"
|
||||
}
|
||||
return "sudo"
|
||||
}
|
||||
|
||||
// ensureUser creates the system user if missing, or reconciles sudo and homedir to match meta.
|
||||
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
u, err := user.Lookup(username)
|
||||
if err != nil {
|
||||
if _, ok := err.(user.UnknownUserError); !ok {
|
||||
return fmt.Errorf("lookup user %s: %w", username, err)
|
||||
}
|
||||
return createUser(username, meta)
|
||||
}
|
||||
return reconcileUser(u, meta)
|
||||
}
|
||||
|
||||
func createUser(username string, meta ConnectionMetadata) error {
|
||||
args := []string{"-s", "/bin/bash"}
|
||||
if meta.Homedir {
|
||||
args = append(args, "-m")
|
||||
} else {
|
||||
args = append(args, "-M")
|
||||
}
|
||||
args = append(args, username)
|
||||
cmd := exec.Command("useradd", args...)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("useradd %s: %w (output: %s)", username, err, string(out))
|
||||
}
|
||||
logger.Info("auth-daemon: created user %s (homedir=%v)", username, meta.Homedir)
|
||||
if meta.Sudo {
|
||||
group := sudoGroup()
|
||||
cmd := exec.Command("usermod", "-aG", group, username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, username, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: added %s to %s", username, group)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustAtoi(s string) int {
|
||||
n, _ := strconv.Atoi(s)
|
||||
return n
|
||||
}
|
||||
|
||||
func reconcileUser(u *user.User, meta ConnectionMetadata) error {
|
||||
group := sudoGroup()
|
||||
inGroup, err := userInGroup(u.Username, group)
|
||||
if err != nil {
|
||||
logger.Warn("auth-daemon: check group %s: %v", group, err)
|
||||
inGroup = false
|
||||
}
|
||||
if meta.Sudo && !inGroup {
|
||||
cmd := exec.Command("usermod", "-aG", group, u.Username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, u.Username, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: added %s to %s", u.Username, group)
|
||||
}
|
||||
} else if !meta.Sudo && inGroup {
|
||||
cmd := exec.Command("gpasswd", "-d", u.Username, group)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: gpasswd -d %s %s: %v (output: %s)", u.Username, group, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: removed %s from %s", u.Username, group)
|
||||
}
|
||||
}
|
||||
if meta.Homedir && u.HomeDir != "" {
|
||||
if st, err := os.Stat(u.HomeDir); err != nil || !st.IsDir() {
|
||||
if err := os.MkdirAll(u.HomeDir, 0755); err != nil {
|
||||
logger.Warn("auth-daemon: mkdir %s: %v", u.HomeDir, err)
|
||||
} else {
|
||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
||||
_ = os.Chown(u.HomeDir, uid, gid)
|
||||
logger.Info("auth-daemon: created home %s for %s", u.HomeDir, u.Username)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func userInGroup(username, groupName string) (bool, error) {
|
||||
// getent group wheel returns "wheel:x:10:user1,user2"
|
||||
cmd := exec.Command("getent", "group", groupName)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
parts := strings.SplitN(strings.TrimSpace(string(out)), ":", 4)
|
||||
if len(parts) < 4 {
|
||||
return false, nil
|
||||
}
|
||||
members := strings.Split(parts[3], ",")
|
||||
for _, m := range members {
|
||||
if strings.TrimSpace(m) == username {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
22
authdaemon/host_stub.go
Normal file
22
authdaemon/host_stub.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build !linux
|
||||
|
||||
package authdaemon
|
||||
|
||||
import "fmt"
|
||||
|
||||
var errLinuxOnly = fmt.Errorf("auth-daemon PAM agent is only supported on Linux")
|
||||
|
||||
// writeCACertIfNotExists returns an error on non-Linux.
|
||||
func writeCACertIfNotExists(path, contents string, force bool) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
|
||||
// ensureUser returns an error on non-Linux.
|
||||
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
|
||||
// writePrincipals returns an error on non-Linux.
|
||||
func writePrincipals(path, username, niceId string) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
28
authdaemon/principals.go
Normal file
28
authdaemon/principals.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// GetPrincipals reads the principals data file at path, looks up the given user, and returns that user's principals as a string slice.
|
||||
// The file format is JSON: object with username keys and array-of-principals values, e.g. {"alice":["alice","usr-123"],"bob":["bob","usr-456"]}.
|
||||
// If the user is not found or the file is missing, returns nil and nil.
|
||||
func GetPrincipals(path, user string) ([]string, error) {
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("principals file path is required")
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("read principals file: %w", err)
|
||||
}
|
||||
var m map[string][]string
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return nil, fmt.Errorf("parse principals file: %w", err)
|
||||
}
|
||||
return m[user], nil
|
||||
}
|
||||
56
authdaemon/routes.go
Normal file
56
authdaemon/routes.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// registerRoutes registers all API routes. Add new endpoints here.
|
||||
func (s *Server) registerRoutes() {
|
||||
s.mux.HandleFunc("/health", s.handleHealth)
|
||||
s.mux.HandleFunc("/connection", s.handleConnection)
|
||||
}
|
||||
|
||||
// ConnectionMetadata is the metadata object in POST /connection.
|
||||
type ConnectionMetadata struct {
|
||||
Sudo bool `json:"sudo"`
|
||||
Homedir bool `json:"homedir"`
|
||||
}
|
||||
|
||||
// ConnectionRequest is the JSON body for POST /connection.
|
||||
type ConnectionRequest struct {
|
||||
CaCert string `json:"caCert"`
|
||||
NiceId string `json:"niceId"`
|
||||
Username string `json:"username"`
|
||||
Metadata ConnectionMetadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// healthResponse is the JSON body for GET /health.
|
||||
type healthResponse struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// handleHealth responds with 200 and {"status":"ok"}.
|
||||
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(healthResponse{Status: "ok"})
|
||||
}
|
||||
|
||||
// handleConnection accepts POST with connection payload and delegates to ProcessConnection.
|
||||
func (s *Server) handleConnection(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
var req ConnectionRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.ProcessConnection(req)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
179
authdaemon/server.go
Normal file
179
authdaemon/server.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// DisableHTTPS: when true, Run() does not start the HTTPS server (for embedded use inside Newt). Call ProcessConnection directly for connection events.
|
||||
DisableHTTPS bool
|
||||
Port int // Required when DisableHTTPS is false. Listen port for the HTTPS server. No default.
|
||||
PresharedKey string // Required when DisableHTTPS is false. HTTP auth (Authorization: Bearer <key> or X-Preshared-Key: <key>). No default.
|
||||
CACertPath string // Required. Where to write the CA cert (e.g. /etc/ssh/ca.pem). No default.
|
||||
Force bool // If true, overwrite existing CA cert (and other items) when content differs. Default false.
|
||||
PrincipalsFilePath string // Required. Path to the principals data file (JSON: username -> array of principals). No default.
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
cfg Config
|
||||
addr string
|
||||
presharedKey string
|
||||
mux *http.ServeMux
|
||||
tlsCert tls.Certificate
|
||||
}
|
||||
|
||||
// generateTLSCert creates a self-signed certificate and key in memory (no disk).
|
||||
func generateTLSCert() (tls.Certificate, error) {
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("generate key: %w", err)
|
||||
}
|
||||
serial, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("serial: %w", err)
|
||||
}
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: serial,
|
||||
Subject: pkix.Name{
|
||||
CommonName: "localhost",
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(365 * 24 * time.Hour),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{"localhost", "127.0.0.1"},
|
||||
}
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("create certificate: %w", err)
|
||||
}
|
||||
keyDER, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("marshal key: %w", err)
|
||||
}
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyDER})
|
||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("x509 key pair: %w", err)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// authMiddleware wraps next and requires a valid preshared key on every request.
|
||||
// Accepts Authorization: Bearer <key> or X-Preshared-Key: <key>.
|
||||
func (s *Server) authMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key := ""
|
||||
if v := r.Header.Get("Authorization"); strings.HasPrefix(v, "Bearer ") {
|
||||
key = strings.TrimSpace(strings.TrimPrefix(v, "Bearer "))
|
||||
}
|
||||
if key == "" {
|
||||
key = strings.TrimSpace(r.Header.Get("X-Preshared-Key"))
|
||||
}
|
||||
if key == "" || subtle.ConstantTimeCompare([]byte(key), []byte(s.presharedKey)) != 1 {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// NewServer builds a new auth-daemon server from cfg. Port, PresharedKey, CACertPath, and PrincipalsFilePath are required (no defaults).
|
||||
func NewServer(cfg Config) (*Server, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, fmt.Errorf("auth-daemon is only supported on Linux, not %s", runtime.GOOS)
|
||||
}
|
||||
if !cfg.DisableHTTPS {
|
||||
if cfg.Port <= 0 {
|
||||
return nil, fmt.Errorf("port is required and must be positive")
|
||||
}
|
||||
if cfg.PresharedKey == "" {
|
||||
return nil, fmt.Errorf("preshared key is required")
|
||||
}
|
||||
}
|
||||
if cfg.CACertPath == "" {
|
||||
return nil, fmt.Errorf("CACertPath is required")
|
||||
}
|
||||
if cfg.PrincipalsFilePath == "" {
|
||||
return nil, fmt.Errorf("PrincipalsFilePath is required")
|
||||
}
|
||||
s := &Server{cfg: cfg}
|
||||
if !cfg.DisableHTTPS {
|
||||
cert, err := generateTLSCert()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.addr = fmt.Sprintf(":%d", cfg.Port)
|
||||
s.presharedKey = cfg.PresharedKey
|
||||
s.mux = http.NewServeMux()
|
||||
s.tlsCert = cert
|
||||
s.registerRoutes()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Run starts the HTTPS server (unless DisableHTTPS) and blocks until ctx is cancelled or the server errors.
|
||||
// When DisableHTTPS is true, Run() blocks on ctx only and does not listen; use ProcessConnection for connection events.
|
||||
func (s *Server) Run(ctx context.Context) error {
|
||||
if s.cfg.DisableHTTPS {
|
||||
logger.Info("auth-daemon running (HTTPS disabled)")
|
||||
<-ctx.Done()
|
||||
s.cleanupPrincipalsFile()
|
||||
return nil
|
||||
}
|
||||
tcfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.tlsCert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
handler := s.authMiddleware(s.mux)
|
||||
srv := &http.Server{
|
||||
Addr: s.addr,
|
||||
Handler: handler,
|
||||
TLSConfig: tcfg,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := srv.Shutdown(shutdownCtx); err != nil {
|
||||
logger.Warn("auth-daemon shutdown: %v", err)
|
||||
}
|
||||
}()
|
||||
logger.Info("auth-daemon listening on https://127.0.0.1%s", s.addr)
|
||||
if err := srv.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
s.cleanupPrincipalsFile()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) cleanupPrincipalsFile() {
|
||||
if s.cfg.PrincipalsFilePath != "" {
|
||||
if err := os.Remove(s.cfg.PrincipalsFilePath); err != nil && !os.IsNotExist(err) {
|
||||
logger.Warn("auth-daemon: remove principals file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ services:
|
||||
container_name: newt
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PANGOLIN_ENDPOINT=https://example.com
|
||||
- PANGOLIN_ENDPOINT=https://app.pangolin.net
|
||||
- NEWT_ID=2ix2t8xk22ubpfy
|
||||
- NEWT_SECRET=nnisrfsdfc7prqsp9ewo1dvtvci50j5uiqotez00dgap0ii2
|
||||
- LOG_LEVEL=DEBUG
|
||||
321
main.go
321
main.go
@@ -1,7 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -11,12 +13,12 @@ import (
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/authdaemon"
|
||||
"github.com/fosrl/newt/docker"
|
||||
"github.com/fosrl/newt/healthcheck"
|
||||
"github.com/fosrl/newt/logger"
|
||||
@@ -58,10 +60,6 @@ type ExitNodeData struct {
|
||||
ExitNodes []ExitNode `json:"exitNodes"`
|
||||
}
|
||||
|
||||
type SSHPublicKeyData struct {
|
||||
PublicKey string `json:"publicKey"`
|
||||
}
|
||||
|
||||
// ExitNode represents an exit node with an ID, endpoint, and weight.
|
||||
type ExitNode struct {
|
||||
ID int `json:"exitNodeId"`
|
||||
@@ -134,6 +132,10 @@ var (
|
||||
preferEndpoint string
|
||||
healthMonitor *healthcheck.Monitor
|
||||
enforceHealthcheckCert bool
|
||||
authDaemonKey string
|
||||
authDaemonPrincipalsFile string
|
||||
authDaemonCACertPath string
|
||||
authDaemonEnabled bool
|
||||
// Build/version (can be overridden via -ldflags "-X main.newtVersion=...")
|
||||
newtVersion = "version_replaceme"
|
||||
|
||||
@@ -156,6 +158,28 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Check for subcommands first (only principals exits early)
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "auth-daemon":
|
||||
// Run principals subcommand only if the next argument is "principals"
|
||||
if len(os.Args) > 2 && os.Args[2] == "principals" {
|
||||
runPrincipalsCmd(os.Args[3:])
|
||||
return
|
||||
}
|
||||
|
||||
// auth-daemon subcommand without "principals" - show help
|
||||
fmt.Println("Error: auth-daemon subcommand requires 'principals' argument")
|
||||
fmt.Println()
|
||||
fmt.Println("Usage:")
|
||||
fmt.Println(" newt auth-daemon principals [options]")
|
||||
fmt.Println()
|
||||
|
||||
// If not "principals", exit the switch to continue with normal execution
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we're running as a Windows service
|
||||
if isWindowsService() {
|
||||
runService("NewtWireguardService", false, os.Args[1:])
|
||||
@@ -187,6 +211,10 @@ func runNewtMain(ctx context.Context) {
|
||||
updownScript = os.Getenv("UPDOWN_SCRIPT")
|
||||
interfaceName = os.Getenv("INTERFACE")
|
||||
portStr := os.Getenv("PORT")
|
||||
authDaemonKey = os.Getenv("AD_KEY")
|
||||
authDaemonPrincipalsFile = os.Getenv("AD_PRINCIPALS_FILE")
|
||||
authDaemonCACertPath = os.Getenv("AD_CA_CERT_PATH")
|
||||
authDaemonEnabledEnv := os.Getenv("AUTH_DAEMON_ENABLED")
|
||||
|
||||
// Metrics/observability env mirrors
|
||||
metricsEnabledEnv := os.Getenv("NEWT_METRICS_PROMETHEUS_ENABLED")
|
||||
@@ -279,10 +307,6 @@ func runNewtMain(ctx context.Context) {
|
||||
// load the prefer endpoint just as a flag
|
||||
flag.StringVar(&preferEndpoint, "prefer-endpoint", "", "Prefer this endpoint for the connection (if set, will override the endpoint from the server)")
|
||||
|
||||
// if authorizedKeysFile == "" {
|
||||
// flag.StringVar(&authorizedKeysFile, "authorized-keys-file", "~/.ssh/authorized_keys", "Path to authorized keys file (if unset, no keys will be authorized)")
|
||||
// }
|
||||
|
||||
// Add new mTLS flags
|
||||
if tlsClientCert == "" {
|
||||
flag.StringVar(&tlsClientCert, "tls-client-cert-file", "", "Path to client certificate file (PEM/DER format)")
|
||||
@@ -379,6 +403,24 @@ func runNewtMain(ctx context.Context) {
|
||||
region = regionEnv
|
||||
}
|
||||
|
||||
// Auth daemon flags
|
||||
if authDaemonKey == "" {
|
||||
flag.StringVar(&authDaemonKey, "ad-pre-shared-key", "", "Pre-shared key for auth daemon authentication")
|
||||
}
|
||||
if authDaemonPrincipalsFile == "" {
|
||||
flag.StringVar(&authDaemonPrincipalsFile, "ad-principals-file", "/var/run/auth-daemon/principals", "Path to the principals file for auth daemon")
|
||||
}
|
||||
if authDaemonCACertPath == "" {
|
||||
flag.StringVar(&authDaemonCACertPath, "ad-ca-cert-path", "/etc/ssh/ca.pem", "Path to the CA certificate file for auth daemon")
|
||||
}
|
||||
if authDaemonEnabledEnv == "" {
|
||||
flag.BoolVar(&authDaemonEnabled, "auth-daemon", false, "Enable auth daemon mode (runs alongside normal newt operation)")
|
||||
} else {
|
||||
if v, err := strconv.ParseBool(authDaemonEnabledEnv); err == nil {
|
||||
authDaemonEnabled = v
|
||||
}
|
||||
}
|
||||
|
||||
// do a --version check
|
||||
version := flag.Bool("version", false, "Print the version")
|
||||
|
||||
@@ -398,6 +440,13 @@ func runNewtMain(ctx context.Context) {
|
||||
|
||||
logger.Init(nil)
|
||||
loggerLevel := util.ParseLogLevel(logLevel)
|
||||
|
||||
// Start auth daemon if enabled
|
||||
if authDaemonEnabled {
|
||||
if err := startAuthDaemon(ctx); err != nil {
|
||||
logger.Fatal("Failed to start auth daemon: %v", err)
|
||||
}
|
||||
}
|
||||
logger.GetLogger().SetLevel(loggerLevel)
|
||||
|
||||
// Initialize telemetry after flags are parsed (so flags override env)
|
||||
@@ -694,8 +743,8 @@ func runNewtMain(ctx context.Context) {
|
||||
|
||||
relayPort := wgData.RelayPort
|
||||
if relayPort == 0 {
|
||||
relayPort = 21820
|
||||
}
|
||||
relayPort = 21820
|
||||
}
|
||||
|
||||
clientsHandleNewtConnection(wgData.PublicKey, endpoint, relayPort)
|
||||
|
||||
@@ -1168,94 +1217,6 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
}
|
||||
})
|
||||
|
||||
// EXPERIMENTAL: WHAT SHOULD WE DO ABOUT SECURITY?
|
||||
client.RegisterHandler("newt/send/ssh/publicKey", func(msg websocket.WSMessage) {
|
||||
logger.Debug("Received SSH public key request")
|
||||
|
||||
var sshPublicKeyData SSHPublicKeyData
|
||||
|
||||
jsonData, err := json.Marshal(msg.Data)
|
||||
if err != nil {
|
||||
logger.Info(fmtErrMarshaling, err)
|
||||
return
|
||||
}
|
||||
if err := json.Unmarshal(jsonData, &sshPublicKeyData); err != nil {
|
||||
logger.Info("Error unmarshaling SSH public key data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
sshPublicKey := sshPublicKeyData.PublicKey
|
||||
|
||||
if authorizedKeysFile == "" {
|
||||
logger.Debug("No authorized keys file set, skipping public key response")
|
||||
return
|
||||
}
|
||||
|
||||
// Expand tilde to home directory if present
|
||||
expandedPath := authorizedKeysFile
|
||||
if strings.HasPrefix(authorizedKeysFile, "~/") {
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
logger.Error("Failed to get user home directory: %v", err)
|
||||
return
|
||||
}
|
||||
expandedPath = filepath.Join(homeDir, authorizedKeysFile[2:])
|
||||
}
|
||||
|
||||
// if it is set but the file does not exist, create it
|
||||
if _, err := os.Stat(expandedPath); os.IsNotExist(err) {
|
||||
logger.Debug("Authorized keys file does not exist, creating it: %s", expandedPath)
|
||||
if err := os.MkdirAll(filepath.Dir(expandedPath), 0755); err != nil {
|
||||
logger.Error("Failed to create directory for authorized keys file: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err := os.Create(expandedPath); err != nil {
|
||||
logger.Error("Failed to create authorized keys file: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the public key already exists in the file
|
||||
fileContent, err := os.ReadFile(expandedPath)
|
||||
if err != nil {
|
||||
logger.Error("Failed to read authorized keys file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the key already exists (trim whitespace for comparison)
|
||||
existingKeys := strings.Split(string(fileContent), "\n")
|
||||
keyAlreadyExists := false
|
||||
trimmedNewKey := strings.TrimSpace(sshPublicKey)
|
||||
|
||||
for _, existingKey := range existingKeys {
|
||||
if strings.TrimSpace(existingKey) == trimmedNewKey && trimmedNewKey != "" {
|
||||
keyAlreadyExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if keyAlreadyExists {
|
||||
logger.Info("SSH public key already exists in authorized keys file, skipping")
|
||||
return
|
||||
}
|
||||
|
||||
// append the public key to the authorized keys file
|
||||
logger.Debug("Appending public key to authorized keys file: %s", sshPublicKey)
|
||||
file, err := os.OpenFile(expandedPath, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
logger.Error("Failed to open authorized keys file: %v", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err := file.WriteString(sshPublicKey + "\n"); err != nil {
|
||||
logger.Error("Failed to write public key to authorized keys file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("SSH public key appended to authorized keys file")
|
||||
})
|
||||
|
||||
// Register handler for adding health check targets
|
||||
client.RegisterHandler("newt/healthcheck/add", func(msg websocket.WSMessage) {
|
||||
logger.Debug("Received health check add request: %+v", msg)
|
||||
@@ -1411,6 +1372,168 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
||||
}
|
||||
})
|
||||
|
||||
// Register handler for SSH certificate issued events
|
||||
client.RegisterHandler("newt/pam/connection", func(msg websocket.WSMessage) {
|
||||
logger.Debug("Received SSH certificate issued message")
|
||||
|
||||
// Define the structure of the incoming message
|
||||
type SSHCertData struct {
|
||||
MessageId int `json:"messageId"`
|
||||
AgentPort int `json:"agentPort"`
|
||||
AgentHost string `json:"agentHost"`
|
||||
CACert string `json:"caCert"`
|
||||
Username string `json:"username"`
|
||||
NiceID string `json:"niceId"`
|
||||
Metadata struct {
|
||||
Sudo bool `json:"sudo"`
|
||||
Homedir bool `json:"homedir"`
|
||||
} `json:"metadata"`
|
||||
}
|
||||
|
||||
var certData SSHCertData
|
||||
jsonData, err := json.Marshal(msg.Data)
|
||||
if err != nil {
|
||||
logger.Error("Error marshaling SSH cert data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// print the received data for debugging
|
||||
logger.Debug("Received SSH cert data: %s", string(jsonData))
|
||||
|
||||
if err := json.Unmarshal(jsonData, &certData); err != nil {
|
||||
logger.Error("Error unmarshaling SSH cert data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we're running the auth daemon internally
|
||||
if authDaemonServer != nil {
|
||||
// Call ProcessConnection directly when running internally
|
||||
logger.Debug("Calling internal auth daemon ProcessConnection for user %s", certData.Username)
|
||||
|
||||
authDaemonServer.ProcessConnection(authdaemon.ConnectionRequest{
|
||||
CaCert: certData.CACert,
|
||||
NiceId: certData.NiceID,
|
||||
Username: certData.Username,
|
||||
Metadata: authdaemon.ConnectionMetadata{
|
||||
Sudo: certData.Metadata.Sudo,
|
||||
Homedir: certData.Metadata.Homedir,
|
||||
},
|
||||
})
|
||||
|
||||
// Send success response back to cloud
|
||||
err = client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
})
|
||||
|
||||
logger.Info("Successfully processed connection via internal auth daemon for user %s", certData.Username)
|
||||
} else {
|
||||
// External auth daemon mode - make HTTP request
|
||||
// Check if auth daemon key is configured
|
||||
if authDaemonKey == "" {
|
||||
logger.Error("Auth daemon key not configured, cannot communicate with daemon")
|
||||
// Send failure response back to cloud
|
||||
err := client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
"error": "auth daemon key not configured",
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Failed to send SSH cert failure response: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare the request body for the auth daemon
|
||||
requestBody := map[string]interface{}{
|
||||
"caCert": certData.CACert,
|
||||
"niceId": certData.NiceID,
|
||||
"username": certData.Username,
|
||||
"metadata": map[string]interface{}{
|
||||
"sudo": certData.Metadata.Sudo,
|
||||
"homedir": certData.Metadata.Homedir,
|
||||
},
|
||||
}
|
||||
|
||||
requestJSON, err := json.Marshal(requestBody)
|
||||
if err != nil {
|
||||
logger.Error("Failed to marshal auth daemon request: %v", err)
|
||||
// Send failure response
|
||||
client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
"error": fmt.Sprintf("failed to marshal request: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Create HTTPS client that skips certificate verification
|
||||
// (auth daemon uses self-signed cert)
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Make the request to the auth daemon
|
||||
url := fmt.Sprintf("https://%s:%d/connection", certData.AgentHost, certData.AgentPort)
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(requestJSON))
|
||||
if err != nil {
|
||||
logger.Error("Failed to create auth daemon request: %v", err)
|
||||
client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
"error": fmt.Sprintf("failed to create request: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+authDaemonKey)
|
||||
|
||||
logger.Debug("Sending SSH cert to auth daemon at %s", url)
|
||||
|
||||
// Send the request
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
logger.Error("Failed to connect to auth daemon: %v", err)
|
||||
client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
"error": fmt.Sprintf("failed to connect to auth daemon: %v", err),
|
||||
})
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check response status
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logger.Error("Auth daemon returned non-OK status: %d", resp.StatusCode)
|
||||
client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
"error": fmt.Sprintf("auth daemon returned status %d", resp.StatusCode),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("Successfully registered SSH certificate with external auth daemon for user %s", certData.Username)
|
||||
}
|
||||
|
||||
// Send success response back to cloud
|
||||
err = client.SendMessage("ws/round-trip/complete", map[string]interface{}{
|
||||
"messageId": certData.MessageId,
|
||||
"complete": true,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Failed to send SSH cert success response: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
client.OnConnect(func() error {
|
||||
publicKey = privateKey.PublicKey()
|
||||
logger.Debug("Public key: %s", publicKey)
|
||||
|
||||
22
scripts/append-release-notes.sh
Normal file
22
scripts/append-release-notes.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${TAG:?}"
|
||||
: "${GHCR_REF:?}"
|
||||
: "${DIGEST:?}"
|
||||
|
||||
NOTES_FILE="$(mktemp)"
|
||||
|
||||
existing_body="$(gh release view "${TAG}" --json body --jq '.body')"
|
||||
cat > "${NOTES_FILE}" <<EOF
|
||||
${existing_body}
|
||||
|
||||
## Container Images
|
||||
- GHCR: \`${GHCR_REF}\`
|
||||
- Docker Hub: \`${DH_REF:-N/A}\`
|
||||
**Digest:** \`${DIGEST}\`
|
||||
EOF
|
||||
|
||||
gh release edit "${TAG}" --draft --notes-file "${NOTES_FILE}"
|
||||
|
||||
rm -f "${NOTES_FILE}"
|
||||
11
scripts/nfpm.yaml.tmpl
Normal file
11
scripts/nfpm.yaml.tmpl
Normal file
@@ -0,0 +1,11 @@
|
||||
name: __PKG_NAME__
|
||||
arch: __ARCH__
|
||||
platform: linux
|
||||
version: __VERSION__
|
||||
section: net
|
||||
priority: optional
|
||||
maintainer: fosrl
|
||||
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||
contents:
|
||||
- src: build/newt
|
||||
dst: /usr/bin/newt
|
||||
149
scripts/publish-apt.sh
Normal file
149
scripts/publish-apt.sh
Normal file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ---- required env ----
|
||||
: "${GH_REPO:?}"
|
||||
: "${S3_BUCKET:?}"
|
||||
: "${AWS_REGION:?}"
|
||||
: "${CLOUDFRONT_DISTRIBUTION_ID:?}"
|
||||
: "${PKG_NAME:?}"
|
||||
: "${SUITE:?}"
|
||||
: "${COMPONENT:?}"
|
||||
: "${APT_GPG_PRIVATE_KEY:?}"
|
||||
|
||||
S3_PREFIX="${S3_PREFIX:-}"
|
||||
if [[ -n "${S3_PREFIX}" && "${S3_PREFIX}" != */ ]]; then
|
||||
S3_PREFIX="${S3_PREFIX}/"
|
||||
fi
|
||||
|
||||
WORKDIR="$(pwd)"
|
||||
mkdir -p repo/apt assets build
|
||||
|
||||
download_asset() {
|
||||
local tag="$1"
|
||||
local pattern="$2"
|
||||
local attempts=12
|
||||
|
||||
for attempt in $(seq 1 "${attempts}"); do
|
||||
if gh release download "${tag}" -R "${GH_REPO}" -p "${pattern}" -D assets; then
|
||||
return 0
|
||||
fi
|
||||
echo "Asset ${pattern} not available yet (attempt ${attempt}/${attempts}); retrying..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "ERROR: Failed to download asset ${pattern} for ${tag} after ${attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
echo "${APT_GPG_PRIVATE_KEY}" | gpg --batch --import >/dev/null 2>&1 || true
|
||||
|
||||
KEYID="$(gpg --list-secret-keys --with-colons | awk -F: '$1=="sec"{print $5; exit}')"
|
||||
if [[ -z "${KEYID}" ]]; then
|
||||
echo "ERROR: No GPG secret key available after import."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine which tags to process
|
||||
TAGS=""
|
||||
if [[ "${BACKFILL_ALL:-false}" == "true" ]]; then
|
||||
echo "Backfill mode: collecting all release tags..."
|
||||
TAGS="$(gh release list -R "${GH_REPO}" --limit 200 --json tagName --jq '.[].tagName')"
|
||||
else
|
||||
if [[ -n "${INPUT_TAG:-}" ]]; then
|
||||
TAGS="${INPUT_TAG}"
|
||||
elif [[ -n "${EVENT_TAG:-}" ]]; then
|
||||
TAGS="${EVENT_TAG}"
|
||||
elif [[ -n "${PUSH_TAG:-}" ]]; then
|
||||
TAGS="${PUSH_TAG}"
|
||||
else
|
||||
echo "No tag provided; using latest release tag..."
|
||||
TAGS="$(gh release view -R "${GH_REPO}" --json tagName --jq '.tagName')"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Tags to process:"
|
||||
printf '%s\n' "${TAGS}"
|
||||
|
||||
# Pull existing repo from S3 so we keep older versions
|
||||
echo "Sync existing repo from S3..."
|
||||
aws s3 sync "s3://${S3_BUCKET}/${S3_PREFIX}apt/" repo/apt/ >/dev/null 2>&1 || true
|
||||
|
||||
# Build and add packages
|
||||
while IFS= read -r TAG; do
|
||||
[[ -z "${TAG}" ]] && continue
|
||||
echo "=== Processing tag: ${TAG} ==="
|
||||
|
||||
rm -rf assets build
|
||||
mkdir -p assets build
|
||||
|
||||
deb_amd64="${PKG_NAME}_${TAG}_amd64.deb"
|
||||
deb_arm64="${PKG_NAME}_${TAG}_arm64.deb"
|
||||
|
||||
download_asset "${TAG}" "${deb_amd64}"
|
||||
download_asset "${TAG}" "${deb_arm64}"
|
||||
|
||||
if [[ ! -f "assets/${deb_amd64}" ]]; then
|
||||
echo "ERROR: Missing release asset: ${deb_amd64}"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "assets/${deb_arm64}" ]]; then
|
||||
echo "ERROR: Missing release asset: ${deb_arm64}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
cp -v assets/*.deb "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
|
||||
done <<< "${TAGS}"
|
||||
|
||||
# Regenerate metadata
|
||||
cd repo/apt
|
||||
|
||||
for arch in amd64 arm64; do
|
||||
mkdir -p "dists/${SUITE}/${COMPONENT}/binary-${arch}"
|
||||
dpkg-scanpackages -a "${arch}" pool > "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||
gzip -fk "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||
done
|
||||
|
||||
# Release file with hashes
|
||||
cat > apt-ftparchive.conf <<EOF
|
||||
APT::FTPArchive::Release::Origin "fosrl";
|
||||
APT::FTPArchive::Release::Label "newt";
|
||||
APT::FTPArchive::Release::Suite "${SUITE}";
|
||||
APT::FTPArchive::Release::Codename "${SUITE}";
|
||||
APT::FTPArchive::Release::Architectures "amd64 arm64";
|
||||
APT::FTPArchive::Release::Components "${COMPONENT}";
|
||||
APT::FTPArchive::Release::Description "Newt APT repository";
|
||||
EOF
|
||||
|
||||
apt-ftparchive -c apt-ftparchive.conf release "dists/${SUITE}" > "dists/${SUITE}/Release"
|
||||
|
||||
# Sign Release
|
||||
cd "dists/${SUITE}"
|
||||
|
||||
gpg --batch --yes --pinentry-mode loopback \
|
||||
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||
--local-user "${KEYID}" \
|
||||
--clearsign -o InRelease Release
|
||||
|
||||
gpg --batch --yes --pinentry-mode loopback \
|
||||
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||
--local-user "${KEYID}" \
|
||||
-abs -o Release.gpg Release
|
||||
|
||||
# Export public key into apt repo root
|
||||
cd ../../..
|
||||
gpg --batch --yes --armor --export "${KEYID}" > "${WORKDIR}/repo/apt/public.key"
|
||||
|
||||
# Upload to S3
|
||||
echo "Uploading to S3..."
|
||||
aws s3 sync "${WORKDIR}/repo/apt" "s3://${S3_BUCKET}/${S3_PREFIX}apt/" --delete
|
||||
|
||||
# Invalidate metadata
|
||||
echo "CloudFront invalidation..."
|
||||
aws cloudfront create-invalidation \
|
||||
--distribution-id "${CLOUDFRONT_DISTRIBUTION_ID}" \
|
||||
--paths "/${S3_PREFIX}apt/dists/*" "/${S3_PREFIX}apt/public.key"
|
||||
|
||||
echo "Done. Repo base: ${REPO_BASE_URL}"
|
||||
Reference in New Issue
Block a user