Compare commits

..

1 Commits

Author SHA1 Message Date
sijie.sun
6f1ecd147b bump version to v2.2.3 2025-03-17 21:37:00 +08:00
155 changed files with 2156 additions and 8958 deletions

View File

@@ -6,84 +6,72 @@ rustflags = ["-C", "linker-flavor=ld.lld"]
linker = "aarch64-linux-gnu-gcc"
[target.aarch64-unknown-linux-musl]
linker = "aarch64-unknown-linux-musl-gcc"
linker = "aarch64-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.mipsel-unknown-linux-musl]
linker = "mipsel-unknown-linux-muslsf-gcc"
linker = "mipsel-linux-muslsf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/mipsel-unknown-linux-muslsf/mipsel-unknown-linux-muslsf/lib",
"./musl_gcc/mipsel-linux-muslsf-cross/mipsel-linux-muslsf/lib",
"-L",
"./musl_gcc/mipsel-unknown-linux-muslsf/mipsel-unknown-linux-muslsf/sysroot/usr/lib",
"-L",
"./musl_gcc/mipsel-unknown-linux-muslsf/lib/gcc/mipsel-unknown-linux-muslsf/15.1.0",
"./musl_gcc/mipsel-linux-muslsf-cross/lib/gcc/mipsel-linux-muslsf/11.2.1",
"-l",
"atomic",
"-l",
"ctz",
"-l",
"gcc",
]
[target.mips-unknown-linux-musl]
linker = "mips-unknown-linux-muslsf-gcc"
linker = "mips-linux-muslsf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/mips-unknown-linux-muslsf/mips-unknown-linux-muslsf/lib",
"./musl_gcc/mips-linux-muslsf-cross/mips-linux-muslsf/lib",
"-L",
"./musl_gcc/mips-unknown-linux-muslsf/mips-unknown-linux-muslsf/sysroot/usr/lib",
"-L",
"./musl_gcc/mips-unknown-linux-muslsf/lib/gcc/mips-unknown-linux-muslsf/15.1.0",
"./musl_gcc/mips-linux-muslsf-cross/lib/gcc/mips-linux-muslsf/11.2.1",
"-l",
"atomic",
"-l",
"ctz",
"-l",
"gcc",
]
[target.armv7-unknown-linux-musleabihf]
linker = "armv7-unknown-linux-musleabihf-gcc"
linker = "armv7l-linux-musleabihf-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.armv7-unknown-linux-musleabi]
linker = "armv7-unknown-linux-musleabi-gcc"
linker = "armv7m-linux-musleabi-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.arm-unknown-linux-musleabihf]
linker = "arm-unknown-linux-musleabihf-gcc"
linker = "arm-linux-musleabihf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/arm-unknown-linux-musleabihf/arm-unknown-linux-musleabihf/lib",
"./musl_gcc/arm-linux-musleabihf-cross/arm-linux-musleabihf/lib",
"-L",
"./musl_gcc/arm-unknown-linux-musleabihf/lib/gcc/arm-unknown-linux-musleabihf/15.1.0",
"./musl_gcc/arm-linux-musleabihf-cross/lib/gcc/arm-linux-musleabihf/11.2.1",
"-l",
"atomic",
"-l",
"gcc",
]
[target.arm-unknown-linux-musleabi]
linker = "arm-unknown-linux-musleabi-gcc"
linker = "arm-linux-musleabi-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/arm-unknown-linux-musleabi/arm-unknown-linux-musleabi/lib",
"./musl_gcc/arm-linux-musleabi-cross/arm-linux-musleabi/lib",
"-L",
"./musl_gcc/arm-unknown-linux-musleabi/lib/gcc/arm-unknown-linux-musleabi/15.1.0",
"./musl_gcc/arm-linux-musleabi-cross/lib/gcc/arm-linux-musleabi/11.2.1",
"-l",
"atomic",
"-l",
"gcc",
]

View File

@@ -31,47 +31,6 @@ jobs:
skip_after_successful_duplicate: 'true'
cancel_others: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/workflows/install_rust.sh"]'
build_web:
runs-on: ubuntu-latest
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v4
with:
node-version: 21
- name: Install pnpm
uses: pnpm/action-setup@v3
with:
version: 9
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r --filter "./easytier-web/*" build
- name: Archive artifact
uses: actions/upload-artifact@v4
with:
name: easytier-web-dashboard
path: |
easytier-web/frontend/dist/*
build:
strategy:
fail-fast: false
@@ -112,12 +71,10 @@ jobs:
- TARGET: x86_64-pc-windows-msvc
OS: windows-latest
ARTIFACT_NAME: windows-x86_64
- TARGET: aarch64-pc-windows-msvc
OS: windows-latest
ARTIFACT_NAME: windows-arm64
- TARGET: i686-pc-windows-msvc
OS: windows-latest
ARTIFACT_NAME: windows-i686
- TARGET: x86_64-unknown-freebsd
OS: ubuntu-22.04
@@ -130,9 +87,7 @@ jobs:
TARGET: ${{ matrix.TARGET }}
OS: ${{ matrix.OS }}
OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }}
needs:
- pre_job
- build_web
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
@@ -141,12 +96,6 @@ jobs:
run: |
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV
- name: Download web artifact
uses: actions/download-artifact@v4
with:
name: easytier-web-dashboard
path: easytier-web/frontend/dist/
- name: Cargo cache
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
uses: actions/cache@v4
@@ -166,38 +115,29 @@ jobs:
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
run: |
bash ./.github/workflows/install_rust.sh
# we set the sysroot when sysroot is a dir
# this dir is a soft link generated by install_rust.sh
# kcp-sys need this to gen ffi bindings. without this clang may fail to find some libc headers such as bits/libc-header-start.h
if [[ -d "./musl_gcc/sysroot" ]]; then
export BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot)
fi
export KCP_SYS_EXTRA_HEADER_PATH=/usr/include/musl-cross
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier
else
if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe
fi
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
cargo build --release --verbose --target $TARGET
fi
# Copied and slightly modified from @lmq8267 (https://github.com/lmq8267)
- name: Build Core & Cli (X86_64 FreeBSD)
uses: vmactions/freebsd-vm@v1
uses: cross-platform-actions/action@v0.23.0
if: ${{ endsWith(matrix.TARGET, 'freebsd') }}
env:
TARGET: ${{ matrix.TARGET }}
with:
envs: TARGET
release: ${{ matrix.BSD_VERSION }}
arch: x86_64
usesh: true
mem: 6144
cpu: 4
operating_system: freebsd
environment_variables: TARGET
architecture: x86-64
version: ${{ matrix.BSD_VERSION }}
shell: bash
memory: 5G
cpu_count: 4
run: |
uname -a
echo $SHELL
@@ -206,36 +146,40 @@ jobs:
whoami
env | sort
pkg install -y git protobuf llvm-devel sudo curl
sudo pkg install -y git protobuf llvm-devel
curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
. $HOME/.cargo/env
source $HOME/.cargo/env
rustup set auto-self-update disable
rustup install 1.86
rustup default 1.86
rustup install 1.84
rustup default 1.84
export CC=clang
export CXX=clang++
export CARGO_TERM_COLOR=always
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
mv ./target/$TARGET/release/easytier-web ./target/$TARGET/release/easytier-web-embed
cargo build --release --verbose --target $TARGET
- name: Install UPX
if: ${{ matrix.OS != 'macos-latest' }}
uses: crazy-max/ghaction-upx@v3
with:
version: latest
install-only: true
- name: Compress
run: |
mkdir -p ./artifacts/objects/
# windows is the only OS using a different convention for executable file name
if [[ $OS =~ ^windows.*$ && $TARGET =~ ^x86_64.*$ ]]; then
SUFFIX=.exe
cp easytier/third_party/*.dll ./artifacts/objects/
elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^i686.*$ ]]; then
SUFFIX=.exe
cp easytier/third_party/i686/*.dll ./artifacts/objects/
cp easytier/third_party/Packet.dll ./artifacts/objects/
cp easytier/third_party/wintun.dll ./artifacts/objects/
elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^aarch64.*$ ]]; then
SUFFIX=.exe
cp easytier/third_party/arm64/*.dll ./artifacts/objects/
cp easytier/third_party/arm64/Packet.dll ./artifacts/objects/
cp easytier/third_party/arm64/wintun.dll ./artifacts/objects/
fi
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
TAG=$GITHUB_REF_NAME
@@ -244,18 +188,14 @@ jobs:
fi
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ ]]; then
UPX_VERSION=5.0.1
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf -
cp upx-${UPX_VERSION}-amd64_linux/upx .
./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
./upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX"
upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX"
fi
mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/
mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/
if [[ ! $TARGET =~ ^mips.*$ ]]; then
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./artifacts/objects/
mv ./target/$TARGET/release/easytier-web-embed"$SUFFIX" ./artifacts/objects/
fi
mv ./artifacts/objects/* ./artifacts/
@@ -273,47 +213,8 @@ jobs:
runs-on: ubuntu-latest
needs:
- pre_job
- build_web
- build
steps:
- name: Mark result as failed
if: needs.build.result != 'success'
run: exit 1
magisk_build:
needs:
- pre_job
- build_web
- build
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v4 # 必须先检出代码才能获取模块配置
# 下载二进制文件到独立目录
- name: Download Linux aarch64 binaries
uses: actions/download-artifact@v4
with:
name: easytier-linux-aarch64
path: ./downloaded-binaries/ # 独立目录避免冲突
# 将二进制文件复制到 Magisk 模块目录
- name: Prepare binaries
run: |
mkdir -p ./easytier-contrib/easytier-magisk/
cp ./downloaded-binaries/easytier-core ./easytier-contrib/easytier-magisk/
cp ./downloaded-binaries/easytier-cli ./easytier-contrib/easytier-magisk/
cp ./downloaded-binaries/easytier-web ./easytier-contrib/easytier-magisk/
# 上传生成的模块
- name: Upload Magisk Module
uses: actions/upload-artifact@v4
with:
name: Easytier-Magisk
path: |
./easytier-contrib/easytier-magisk
!./easytier-contrib/easytier-magisk/build.sh
!./easytier-contrib/easytier-magisk/magisk_update.json
if-no-files-found: error

View File

@@ -11,7 +11,7 @@ on:
image_tag:
description: 'Tag for this image build'
type: string
default: 'v2.3.1'
default: 'v2.2.3'
required: true
mark_latest:
description: 'Mark this image as latest'

View File

@@ -63,11 +63,6 @@ jobs:
GUI_TARGET: aarch64-pc-windows-msvc
ARTIFACT_NAME: windows-arm64
- TARGET: i686-pc-windows-msvc
OS: windows-latest
GUI_TARGET: i686-pc-windows-msvc
ARTIFACT_NAME: windows-i686
runs-on: ${{ matrix.OS }}
env:
NAME: easytier
@@ -78,56 +73,6 @@ jobs:
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
run: |
sudo apt update
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
- name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
run: |
# see https://tauri.app/v1/guides/building/linux/
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
sudo dpkg --add-architecture arm64
sudo apt update
sudo apt install aptitude
sudo aptitude install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64 \
libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64 \
libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
- uses: actions/checkout@v3
- name: Set current ref as env variable
@@ -179,13 +124,59 @@ jobs:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install GUI dependencies (x86 only)
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
run: |
sudo apt install -qq libwebkit2gtk-4.1-dev \
build-essential \
curl \
wget \
file \
libgtk-3-dev \
librsvg2-dev \
libxdo-dev \
libssl-dev \
patchelf
- name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
run: |
# see https://tauri.app/v1/guides/building/linux/
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
sudo dpkg --add-architecture arm64
sudo apt-get update
sudo apt-get install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64
sudo apt-get install -y libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64
sudo apt install -f -o Dpkg::Options::="--force-overwrite" libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
- name: copy correct DLLs
if: ${{ matrix.OS == 'windows-latest' }}
run: |
if [[ $GUI_TARGET =~ ^aarch64.*$ ]]; then
cp ./easytier/third_party/arm64/*.dll ./easytier-gui/src-tauri/
elif [[ $GUI_TARGET =~ ^i686.*$ ]]; then
cp ./easytier/third_party/i686/*.dll ./easytier-gui/src-tauri/
else
cp ./easytier/third_party/*.dll ./easytier-gui/src-tauri/
fi

View File

@@ -8,33 +8,49 @@
# dependencies are only needed on ubuntu as that's the only place where
# we make cross-compilation
if [[ $OS =~ ^ubuntu.*$ ]]; then
sudo apt-get update && sudo apt-get install -qq musl-tools libappindicator3-dev llvm clang
# https://github.com/cross-tools/musl-cross/releases
# if "musl" is a substring of TARGET, we assume that we are using musl
MUSL_TARGET=$TARGET
# if target is mips or mipsel, we should use soft-float version of musl
if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then
MUSL_TARGET=${TARGET}sf
fi
if [[ $MUSL_TARGET =~ musl ]]; then
sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools libappindicator3-dev llvm clang
# curl -s musl.cc | grep mipsel
case $TARGET in
mipsel-unknown-linux-musl)
MUSL_URI=mipsel-linux-muslsf
;;
mips-unknown-linux-musl)
MUSL_URI=mips-linux-muslsf
;;
aarch64-unknown-linux-musl)
MUSL_URI=aarch64-linux-musl
;;
armv7-unknown-linux-musleabihf)
MUSL_URI=armv7l-linux-musleabihf
;;
armv7-unknown-linux-musleabi)
MUSL_URI=armv7m-linux-musleabi
;;
arm-unknown-linux-musleabihf)
MUSL_URI=arm-linux-musleabihf
;;
arm-unknown-linux-musleabi)
MUSL_URI=arm-linux-musleabi
;;
esac
if [ -n "$MUSL_URI" ]; then
mkdir -p ./musl_gcc
wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/
tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot
sudo chmod -R a+rwx ./musl_gcc
wget -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/
tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/${MUSL_URI}/include/ /usr/include/musl-cross
fi
fi
# see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable
rustup install 1.86
rustup default 1.86
rustup install 1.84
rustup default 1.84
# mips/mipsel cannot add target from rustup, need compile by ourselves
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255
cd "$PWD/musl_gcc/${MUSL_URI}-cross/lib/gcc/${MUSL_URI}/11.2.1" || exit 255
# for panic-abort
cp libgcc_eh.a libunwind.a

View File

@@ -21,7 +21,7 @@ on:
version:
description: 'Version for this release'
type: string
default: 'v2.3.1'
default: 'v2.2.3'
required: true
make_latest:
description: 'Mark this release as latest'
@@ -57,7 +57,7 @@ jobs:
repo: EasyTier/EasyTier
path: release_assets_nozip
- name: Download Mobile Artifact
- name: Download GUI Artifact
uses: dawidd6/action-download-artifact@v6
with:
github_token: ${{secrets.GITHUB_TOKEN}}
@@ -78,14 +78,7 @@ jobs:
ls -l -R ./
chmod -R 755 .
for x in `ls`; do
if [ "$x" = "Easytier-Magisk" ]; then
# for Easytier-Magisk, make sure files are in the root of the zip
cd $x;
zip -r ../../zipped_assets/$x-${VERSION}.zip .;
cd ..;
else
zip -r ../zipped_assets/$x-${VERSION}.zip $x;
fi
zip ../zipped_assets/$x-${VERSION}.zip $x/*;
done
- name: Release

View File

@@ -47,40 +47,11 @@ jobs:
- name: Setup system for test
run: |
sudo modprobe br_netfilter
sudo sysctl net.bridge.bridge-nf-call-iptables=0
sudo sysctl net.bridge.bridge-nf-call-ip6tables=0
sudo sysctl net.ipv6.conf.lo.disable_ipv6=0
sudo ip addr add 2001:db8::2/64 dev lo
- uses: actions/setup-node@v4
with:
node-version: 21
- name: Install pnpm
uses: pnpm/action-setup@v3
with:
version: 9
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
pnpm -r install
pnpm -r --filter "./easytier-web/*" build
- name: Cargo cache
uses: actions/cache@v4
with:

975
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,6 @@
[workspace]
resolver = "2"
members = [
"easytier",
"easytier-gui/src-tauri",
"easytier-rpc-build",
"easytier-web",
"easytier-contrib/easytier-ffi",
]
members = ["easytier", "easytier-gui/src-tauri", "easytier-rpc-build", "easytier-web"]
default-members = ["easytier", "easytier-web"]
[profile.dev]
@@ -16,5 +10,3 @@ panic = "unwind"
panic = "abort"
lto = true
codegen-units = 1
opt-level = 3
strip = true

View File

@@ -1,13 +1,10 @@
# EasyTier
[![Github release](https://img.shields.io/github/v/tag/EasyTier/EasyTier)](https://github.com/EasyTier/EasyTier/releases)
[![GitHub](https://img.shields.io/github/license/EasyTier/EasyTier)](https://github.com/EasyTier/EasyTier/blob/main/LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/EasyTier/EasyTier)](https://github.com/EasyTier/EasyTier/commits/main)
[![GitHub issues](https://img.shields.io/github/issues/EasyTier/EasyTier)](https://github.com/EasyTier/EasyTier/issues)
[![GitHub Core Actions](https://github.com/EasyTier/EasyTier/actions/workflows/core.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/core.yml)
[![GitHub GUI Actions](https://github.com/EasyTier/EasyTier/actions/workflows/gui.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/gui.yml)
[![GitHub Test Actions](https://github.com/EasyTier/EasyTier/actions/workflows/test.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/test.yml)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/EasyTier/EasyTier)
[简体中文](/README_CN.md) | [English](/README.md)
@@ -64,36 +61,7 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented
wget -O /tmp/easytier.sh "https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh" && bash /tmp/easytier.sh install
```
The script supports the following commands and options:
Commands:
- `install`: Install EasyTier
- `uninstall`: Uninstall EasyTier
- `update`: Update EasyTier to the latest version
- `help`: Show help message
Options:
- `--skip-folder-verify`: Skip folder verification during installation
- `--skip-folder-fix`: Skip automatic folder path fixing
- `--no-gh-proxy`: Disable GitHub proxy
- `--gh-proxy`: Set custom GitHub proxy URL (default: https://ghfast.top/)
Examples:
```sh
# Show help
bash /tmp/easytier.sh help
# Install with options
bash /tmp/easytier.sh install --skip-folder-verify
bash /tmp/easytier.sh install --no-gh-proxy
bash /tmp/easytier.sh install --gh-proxy https://your-proxy.com/
# Update EasyTier
bash /tmp/easytier.sh update
# Uninstall EasyTier
bash /tmp/easytier.sh uninstall
```
You can also uninstall/update Easytier by the command "uninstall" or "update" of this script
6. **Install by Homebrew (For MacOS Only)**

View File

@@ -61,36 +61,7 @@
wget -O /tmp/easytier.sh "https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh" && bash /tmp/easytier.sh install
```
脚本支持以下命令和选项:
命令:
- `install`: 安装 EasyTier
- `uninstall`: 卸载 EasyTier
- `update`: 更新 EasyTier 到最新版本
- `help`: 显示帮助信息
选项:
- `--skip-folder-verify`: 跳过安装过程中的文件夹验证
- `--skip-folder-fix`: 跳过自动修复文件夹路径
- `--no-gh-proxy`: 禁用 GitHub 代理
- `--gh-proxy`: 设置自定义 GitHub 代理 URL (默认值: https://ghfast.top/)
示例:
```sh
# 查看帮助
bash /tmp/easytier.sh help
# 安装(带选项)
bash /tmp/easytier.sh install --skip-folder-verify
bash /tmp/easytier.sh install --no-gh-proxy
bash /tmp/easytier.sh install --gh-proxy https://your-proxy.com/
# 更新 EasyTier
bash /tmp/easytier.sh update
# 卸载 EasyTier
bash /tmp/easytier.sh uninstall
```
使用本脚本安装的 Easytier 可以使用脚本的 uninstall/update 对其卸载/升级
6. **使用 Homebrew 安装 (仅适用于 MacOS)**

View File

@@ -1,16 +0,0 @@
[package]
name = "easytier-ffi"
version = "0.1.0"
edition = "2021"
[lib]
crate-type = ["cdylib"]
[dependencies]
easytier = { path = "../../easytier" }
once_cell = "1.18.0"
dashmap = "6.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1"

View File

@@ -1,159 +0,0 @@
public class EasyTierFFI
{
// 导入 DLL 函数
private const string DllName = "easytier_ffi.dll";
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
private static extern int parse_config([MarshalAs(UnmanagedType.LPStr)] string cfgStr);
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
private static extern int run_network_instance([MarshalAs(UnmanagedType.LPStr)] string cfgStr);
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
private static extern int retain_network_instance(IntPtr instNames, int length);
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
private static extern int collect_network_infos(IntPtr infos, int maxLength);
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
private static extern void get_error_msg(out IntPtr errorMsg);
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
private static extern void free_string(IntPtr str);
// 定义 KeyValuePair 结构体
[StructLayout(LayoutKind.Sequential)]
public struct KeyValuePair
{
public IntPtr Key;
public IntPtr Value;
}
// 解析配置
public static void ParseConfig(string config)
{
if (string.IsNullOrEmpty(config))
{
throw new ArgumentException("Configuration string cannot be null or empty.");
}
int result = parse_config(config);
if (result < 0)
{
throw new Exception(GetErrorMessage());
}
}
// 启动网络实例
public static void RunNetworkInstance(string config)
{
if (string.IsNullOrEmpty(config))
{
throw new ArgumentException("Configuration string cannot be null or empty.");
}
int result = run_network_instance(config);
if (result < 0)
{
throw new Exception(GetErrorMessage());
}
}
// 保留网络实例
public static void RetainNetworkInstances(string[] instanceNames)
{
IntPtr[] namePointers = null;
IntPtr namesPtr = IntPtr.Zero;
try
{
if (instanceNames != null && instanceNames.Length > 0)
{
namePointers = new IntPtr[instanceNames.Length];
for (int i = 0; i < instanceNames.Length; i++)
{
if (string.IsNullOrEmpty(instanceNames[i]))
{
throw new ArgumentException("Instance name cannot be null or empty.");
}
namePointers[i] = Marshal.StringToHGlobalAnsi(instanceNames[i]);
}
namesPtr = Marshal.AllocHGlobal(Marshal.SizeOf<IntPtr>() * namePointers.Length);
Marshal.Copy(namePointers, 0, namesPtr, namePointers.Length);
}
int result = retain_network_instance(namesPtr, instanceNames?.Length ?? 0);
if (result < 0)
{
throw new Exception(GetErrorMessage());
}
}
finally
{
if (namePointers != null)
{
foreach (var ptr in namePointers)
{
if (ptr != IntPtr.Zero)
{
Marshal.FreeHGlobal(ptr);
}
}
}
if (namesPtr != IntPtr.Zero)
{
Marshal.FreeHGlobal(namesPtr);
}
}
}
// 收集网络信息
public static KeyValuePair<string, string>[] CollectNetworkInfos(int maxLength)
{
IntPtr buffer = Marshal.AllocHGlobal(Marshal.SizeOf<KeyValuePair>() * maxLength);
try
{
int count = collect_network_infos(buffer, maxLength);
if (count < 0)
{
throw new Exception(GetErrorMessage());
}
var result = new KeyValuePair<string, string>[count];
for (int i = 0; i < count; i++)
{
var kv = Marshal.PtrToStructure<KeyValuePair>(buffer + i * Marshal.SizeOf<KeyValuePair>());
string key = Marshal.PtrToStringAnsi(kv.Key);
string value = Marshal.PtrToStringAnsi(kv.Value);
// 释放由 FFI 分配的字符串内存
free_string(kv.Key);
free_string(kv.Value);
result[i] = new KeyValuePair<string, string>(key, value);
}
return result;
}
finally
{
Marshal.FreeHGlobal(buffer);
}
}
// 获取错误信息
private static string GetErrorMessage()
{
get_error_msg(out IntPtr errorMsgPtr);
if (errorMsgPtr == IntPtr.Zero)
{
return "Unknown error";
}
string errorMsg = Marshal.PtrToStringAnsi(errorMsgPtr);
free_string(errorMsgPtr); // 释放错误信息字符串
return errorMsg;
}
}

View File

@@ -1,199 +0,0 @@
use std::sync::Mutex;
use dashmap::DashMap;
use easytier::{
common::config::{ConfigLoader as _, TomlConfigLoader},
launcher::NetworkInstance,
};
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> =
once_cell::sync::Lazy::new(DashMap::new);
static ERROR_MSG: once_cell::sync::Lazy<Mutex<Vec<u8>>> =
once_cell::sync::Lazy::new(|| Mutex::new(Vec::new()));
#[repr(C)]
pub struct KeyValuePair {
pub key: *const std::ffi::c_char,
pub value: *const std::ffi::c_char,
}
fn set_error_msg(msg: &str) {
let bytes = msg.as_bytes();
let mut msg_buf = ERROR_MSG.lock().unwrap();
let len = bytes.len();
msg_buf.resize(len, 0);
msg_buf[..len].copy_from_slice(bytes);
}
#[no_mangle]
pub extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
let msg_buf = ERROR_MSG.lock().unwrap();
if msg_buf.is_empty() {
unsafe {
*out = std::ptr::null();
}
return;
}
let cstr = std::ffi::CString::new(&msg_buf[..]).unwrap();
unsafe {
*out = cstr.into_raw();
}
}
#[no_mangle]
pub extern "C" fn free_string(s: *const std::ffi::c_char) {
if s.is_null() {
return;
}
unsafe {
let _ = std::ffi::CString::from_raw(s as *mut std::ffi::c_char);
}
}
#[no_mangle]
pub extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe {
assert!(!cfg_str.is_null());
std::ffi::CStr::from_ptr(cfg_str)
.to_string_lossy()
.into_owned()
};
if let Err(e) = TomlConfigLoader::new_from_str(&cfg_str) {
set_error_msg(&format!("failed to parse config: {:?}", e));
return -1;
}
0
}
#[no_mangle]
pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
let cfg_str = unsafe {
assert!(!cfg_str.is_null());
std::ffi::CStr::from_ptr(cfg_str)
.to_string_lossy()
.into_owned()
};
let cfg = match TomlConfigLoader::new_from_str(&cfg_str) {
Ok(cfg) => cfg,
Err(e) => {
set_error_msg(&format!("failed to parse config: {}", e));
return -1;
}
};
let inst_name = cfg.get_inst_name();
if INSTANCE_MAP.contains_key(&inst_name) {
set_error_msg("instance already exists");
return -1;
}
let mut instance = NetworkInstance::new(cfg);
if let Err(e) = instance.start().map_err(|e| e.to_string()) {
set_error_msg(&format!("failed to start instance: {}", e));
return -1;
}
INSTANCE_MAP.insert(inst_name, instance);
0
}
#[no_mangle]
pub extern "C" fn retain_network_instance(
inst_names: *const *const std::ffi::c_char,
length: usize,
) -> std::ffi::c_int {
if length == 0 {
INSTANCE_MAP.clear();
return 0;
}
let inst_names = unsafe {
assert!(!inst_names.is_null());
std::slice::from_raw_parts(inst_names, length)
.iter()
.map(|&name| {
assert!(!name.is_null());
std::ffi::CStr::from_ptr(name)
.to_string_lossy()
.into_owned()
})
.collect::<Vec<_>>()
};
let _ = INSTANCE_MAP.retain(|k, _| inst_names.contains(k));
0
}
#[no_mangle]
pub extern "C" fn collect_network_infos(
infos: *mut KeyValuePair,
max_length: usize,
) -> std::ffi::c_int {
if max_length == 0 {
return 0;
}
let infos = unsafe {
assert!(!infos.is_null());
std::slice::from_raw_parts_mut(infos, max_length)
};
let mut index = 0;
for instance in INSTANCE_MAP.iter() {
if index >= max_length {
break;
}
let key = instance.key();
let Some(value) = instance.get_running_info() else {
continue;
};
// convert value to json string
let value = match serde_json::to_string(&value) {
Ok(value) => value,
Err(e) => {
set_error_msg(&format!("failed to serialize instance info: {}", e));
return -1;
}
};
infos[index] = KeyValuePair {
key: std::ffi::CString::new(key.clone()).unwrap().into_raw(),
value: std::ffi::CString::new(value).unwrap().into_raw(),
};
index += 1;
}
index as std::ffi::c_int
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_config() {
let cfg_str = r#"
inst_name = "test"
network = "test_network"
fdsafdsa
"#;
let cstr = std::ffi::CString::new(cfg_str).unwrap();
assert_eq!(parse_config(cstr.as_ptr()), 0);
}
#[test]
fn test_run_network_instance() {
let cfg_str = r#"
inst_name = "test"
network = "test_network"
"#;
let cstr = std::ffi::CString::new(cfg_str).unwrap();
assert_eq!(run_network_instance(cstr.as_ptr()), 0);
}
}

View File

@@ -1,33 +0,0 @@
#!/sbin/sh
#################
# Initialization
#################
umask 022
# echo before loading util_functions
ui_print() { echo "$1"; }
require_new_magisk() {
ui_print "********************************"
ui_print " Please install Magisk v20.4+! "
ui_print "********************************"
exit 1
}
#########################
# Load util_functions.sh
#########################
OUTFD=$2
ZIPFILE=$3
mount /data 2>/dev/null
[ -f /data/adb/magisk/util_functions.sh ] || require_new_magisk
. /data/adb/magisk/util_functions.sh
[ $MAGISK_VER_CODE -lt 20400 ] && require_new_magisk
install_module
exit 0

View File

@@ -1,6 +0,0 @@
# easytier_magisk版模块
magisk安装后重启
目录位置:/data/adb/modules/easytier_magisk
配置文件位置://data/adb/modules/easytier_magisk/config/config.toml
修改config.conf即可修改后配置文件后去magisk app重新开关模块即可生效

View File

@@ -1,14 +0,0 @@
#!/data/adb/magisk/busybox sh
MODDIR=${0%/*}
# 查找 easytier-core 进程的 PID
PID=$(pgrep easytier-core)
# 检查是否找到了进程
if [ -z "$PID" ]; then
echo "easytier-core 进程未找到"
else
# 结束进程
kill $PID
echo "已结束 easytier-core 进程 (PID: $PID)"
fi

View File

@@ -1,25 +0,0 @@
#!/bin/sh
version=$(cat module.prop | grep 'version=' | awk -F '=' '{print $2}' | sed 's/ (.*//')
version='v'$(grep '^version =' ../../easytier/Cargo.toml | cut -d '"' -f 2)
if [ -z "$version" ]; then
echo "Error: 版本号不存在."
exit 1
fi
filename="easytier_magisk_${version}.zip"
echo $version
if [ -f "./easytier-core" ] && [ -f "./easytier-cli" ] && [ -f "./easytier-web" ]; then
zip -r -o -X "$filename" ./ -x '.git/*' -x '.github/*' -x 'folder/*' -x 'build.sh' -x 'magisk_update.json'
else
wget -O "easytier_last.zip" https://github.com/EasyTier/EasyTier/releases/download/"$version"/easytier-linux-aarch64-"$version".zip
unzip -o easytier_last.zip -d ./
mv ./easytier-linux-aarch64/* ./
rm -rf ./easytier_last.zip
rm -rf ./easytier-linux-aarch64
zip -r -o -X "$filename" ./ -x '.git/*' -x '.github/*' -x 'folder/*' -x 'build.sh' -x 'magisk_update.json'
fi

View File

@@ -1,37 +0,0 @@
instance_name = "default"
dhcp = false
#ipv4="本机ip"
listeners = [
"tcp://0.0.0.0:11010",
"udp://0.0.0.0:11010",
"wg://0.0.0.0:11011",
"ws://0.0.0.0:11011/",
"wss://0.0.0.0:11012/",
]
mapped_listeners = []
exit_nodes = []
rpc_portal = "0.0.0.0:15888"
[network_identity]
network_name = "default"
network_secret = ""
[[peer]]
#uri = "协议://中转ip:端口"
[flags]
default_protocol = "tcp"
dev_name = ""
enable_encryption = true
enable_ipv6 = true
mtu = 1380
latency_first = false
enable_exit_node = false
no_tun = false
use_smoltcp = false
foreign_network_whitelist = "*"
disable_p2p = false
relay_all_peer_rpc = false
disable_udp_hole_punching = false

View File

@@ -1,7 +0,0 @@
ui_print '安装完成'
ui_print '当前架构为' + $ARCH
ui_print '当前系统版本为' + $API
ui_print '安装目录为: /data/adb/modules/easytier_magisk'
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.toml'
ui_print '修改后配置文件后在magisk app点击操作按钮即可生效'
ui_print '记得重启'

View File

@@ -1,48 +0,0 @@
#!/system/bin/sh
MODDIR=${0%/*}
CONFIG_FILE="${MODDIR}/config/config.toml"
LOG_FILE="${MODDIR}/log.log"
MODULE_PROP="${MODDIR}/module.prop"
EASYTIER="${MODDIR}/easytier-core"
# 更新module.prop文件中的description
update_module_description() {
local status_message=$1
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP}
}
if [ ! -e /dev/net/tun ]; then
if [ ! -d /dev/net ]; then
mkdir -p /dev/net
fi
ln -s /dev/tun /dev/net/tun
fi
while true; do
if ls $MODDIR | grep -q "disable"; then
update_module_description "关闭中"
if pgrep -f 'easytier-core' >/dev/null; then
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭 ..."
pkill easytier-core # 关闭进程
fi
else
if ! pgrep -f 'easytier-core' >/dev/null; then
if [ ! -f "$CONFIG_FILE" ]; then
update_module_description "config.toml不存在"
sleep 3s
continue
fi
TZ=Asia/Shanghai ${EASYTIER} -c ${CONFIG_FILE} > ${LOG_FILE} &
sleep 5s # 等待easytier-core启动完成
update_module_description "已开启(不一定运行成功)"
ip rule add from all lookup main
else
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在"
fi
fi
sleep 3s # 暂停3秒后再次执行循环
done

View File

@@ -1,6 +0,0 @@
{
"version": "v1.0",
"versionCode": 1,
"zipUrl": "",
"changelog": ""
}

View File

@@ -1,7 +0,0 @@
id=easytier_magisk
name=EasyTier_Magisk
version=v2.3.1
versionCode=1
author=EasyTier
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
updateJson=https://raw.githubusercontent.com/EasyTier/EasyTier/refs/heads/main/easytier-contrib/easytier-magisk/magisk_update.json

View File

@@ -1,27 +0,0 @@
#!/data/adb/magisk/busybox sh
MODDIR=${0%/*}
# MODDIR="$(dirname $(readlink -f "$0"))"
chmod 755 ${MODDIR}/*
# 等待系统启动成功
while [ "$(getprop sys.boot_completed)" != "1" ]; do
sleep 5s
done
# 防止系统挂起
echo "PowerManagerService.noSuspend" > /sys/power/wake_lock
# 修改模块描述
sed -i 's/$(description=)$[^"]*/\1[状态]关闭中/' "$MODDIR/module.prop"
# 等待 3 秒
sleep 3s
"${MODDIR}/easytier_core.sh" &
# 检查是否启用模块
while [ ! -f ${MODDIR}/disable ]; do
sleep 2
done
pkill easytier-core

View File

@@ -1,2 +0,0 @@
nameserver 114.114.114.114
nameserver 223.5.5.5

View File

@@ -1,3 +0,0 @@
MODDIR=${0%/*}
pkill easytier-core # 结束 easytier-core 进程
rm -rf $MODDIR/*

View File

@@ -18,11 +18,7 @@ cd ../tauri-plugin-vpnservice
pnpm install
pnpm build
cd ../easytier-web/frontend-lib
pnpm install
pnpm build
cd ../../easytier-gui
cd ../easytier-gui
pnpm install
pnpm tauri build
```

View File

@@ -113,4 +113,3 @@ event:
VpnPortalClientDisconnected: VPN门户客户端已断开连接
DhcpIpv4Changed: DHCP IPv4地址更改
DhcpIpv4Conflicted: DHCP IPv4地址冲突
PortForwardAdded: 端口转发添加

View File

@@ -112,4 +112,3 @@ event:
VpnPortalClientDisconnected: VpnPortalClientDisconnected
DhcpIpv4Changed: DhcpIpv4Changed
DhcpIpv4Conflicted: DhcpIpv4Conflicted
PortForwardAdded: PortForwardAdded

View File

@@ -1,7 +1,7 @@
{
"name": "easytier-gui",
"type": "module",
"version": "2.3.1",
"version": "2.2.3",
"private": true,
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
"scripts": {
@@ -13,7 +13,7 @@
"lint:fix": "eslint . --ignore-pattern src-tauri --fix"
},
"dependencies": {
"@primevue/themes": "4.3.3",
"@primevue/themes": "^4.2.1",
"@tauri-apps/plugin-autostart": "2.0.0",
"@tauri-apps/plugin-clipboard-manager": "2.0.0",
"@tauri-apps/plugin-os": "2.0.0",
@@ -24,7 +24,7 @@
"easytier-frontend-lib": "workspace:*",
"ip-num": "1.5.1",
"pinia": "^2.2.4",
"primevue": "4.3.3",
"primevue": "^4.2.1",
"tauri-plugin-vpnservice-api": "workspace:*",
"vue": "^3.5.12",
"vue-router": "^4.4.5"
@@ -32,7 +32,7 @@
"devDependencies": {
"@antfu/eslint-config": "^3.7.3",
"@intlify/unplugin-vue-i18n": "^5.2.0",
"@primevue/auto-import-resolver": "4.3.3",
"@primevue/auto-import-resolver": "^4.1.0",
"@tauri-apps/api": "2.1.0",
"@tauri-apps/cli": "2.1.0",
"@types/default-gateway": "^7.2.2",

View File

@@ -1,6 +1,6 @@
[package]
name = "easytier-gui"
version = "2.3.1"
version = "2.2.3"
description = "EasyTier GUI"
authors = ["you"]
edition = "2021"
@@ -14,13 +14,6 @@ crate-type = ["staticlib", "cdylib", "rlib"]
[build-dependencies]
tauri-build = { version = "2.0.0-rc", features = [] }
# enable thunk-rs when compiling for x86_64 or i686 windows
[target.x86_64-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[target.i686-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[dependencies]
# wry 0.47 may crash on android, see https://github.com/EasyTier/EasyTier/issues/527
tauri = { version = "=2.0.6", features = [
@@ -60,4 +53,4 @@ tauri-plugin-autostart = "2.0"
custom-protocol = ["tauri/custom-protocol"]
[target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies]
tauri-plugin-single-instance = "2.2.3"
tauri-plugin-single-instance = "2.0.0-rc.0"

View File

@@ -1,12 +1,3 @@
fn main() {
// enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")]
if !std::env::var("TARGET")
.unwrap_or_default()
.contains("aarch64")
{
thunk::thunk();
}
tauri_build::build();
}

View File

@@ -108,12 +108,7 @@ fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> {
fn toggle_window_visibility<R: tauri::Runtime>(app: &tauri::AppHandle<R>) {
if let Some(window) = app.get_webview_window("main") {
if window.is_visible().unwrap_or_default() {
if window.is_minimized().unwrap_or_default() {
let _ = window.unminimize();
let _ = window.set_focus();
} else {
let _ = window.hide();
}
let _ = window.hide();
} else {
let _ = window.show();
let _ = window.set_focus();

View File

@@ -17,7 +17,7 @@
"createUpdaterArtifacts": false
},
"productName": "easytier-gui",
"version": "2.3.1",
"version": "2.2.3",
"identifier": "com.kkrainbow.easytier",
"plugins": {},
"app": {

View File

@@ -132,14 +132,6 @@ async function onNetworkInstanceChange() {
return
}
// if use no tun mode, stop the vpn service
const no_tun = networkStore.isNoTunEnabled(insts[0])
if (no_tun) {
console.error('no tun mode, stop vpn service')
await doStopVpn()
return
}
let network_length = curNetworkInfo?.my_node_info?.virtual_ipv4.network_length
if (!network_length) {
network_length = 24

View File

@@ -128,13 +128,6 @@ export const useNetworkStore = defineStore('networkStore', {
}
this.saveAutoStartInstIdsToLocalStorage()
},
isNoTunEnabled(instanceId: string): boolean {
const cfg = this.networkList.find((cfg) => cfg.instance_id === instanceId)
if (!cfg)
return false
return cfg.no_tun ?? false
},
},
})

View File

@@ -45,11 +45,3 @@
border-radius: 4px;
background-color: #0000005d;
}
.p-password {
width: 100%;
}
.p-password>input {
width: 100%;
}

View File

@@ -1,6 +1,6 @@
[package]
name = "easytier-web"
version = "2.3.1"
version = "2.2.3"
edition = "2021"
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
@@ -18,7 +18,6 @@ axum = { version = "0.7", features = ["macros"] }
axum-login = { version = "0.16" }
password-auth = { version = "1.0.0" }
axum-messages = "0.7.0"
axum-embed = { version = "0.1.0", optional = true }
tower-sessions-sqlx-store = { version = "0.14.1", features = ["sqlite"] }
tower-sessions = { version = "0.13.0", default-features = false, features = [
"signed",
@@ -60,14 +59,3 @@ uuid = { version = "1.5.0", features = [
] }
chrono = { version = "0.4.37", features = ["serde"] }
[features]
default = []
embed = ["dep:axum-embed"]
# enable thunk-rs when compiling for x86_64 or i686 windows
[target.x86_64-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[target.i686-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }

View File

@@ -1,7 +0,0 @@
fn main() {
// enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")]
if !std::env::var("TARGET").unwrap_or_default().contains("aarch64"){
thunk::thunk();
}
}

View File

@@ -18,14 +18,14 @@
"preview": "vite preview"
},
"dependencies": {
"@primevue/themes": "4.3.3",
"@primevue/themes": "^4.2.1",
"@vueuse/core": "^11.1.0",
"aura": "link:@primevue\\themes\\aura",
"axios": "^1.7.7",
"floating-vue": "^5.2",
"ip-num": "1.5.1",
"primeicons": "^7.0.0",
"primevue": "4.3.3",
"primevue": "^4.2.1",
"tailwindcss-primeui": "^0.3.4",
"ts-md5": "^1.3.1",
"uuid": "^11.0.2",

View File

@@ -1,7 +1,7 @@
<script setup lang="ts">
import InputGroup from 'primevue/inputgroup'
import InputGroupAddon from 'primevue/inputgroupaddon'
import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password } from 'primevue'
import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button } from 'primevue'
import { DEFAULT_NETWORK_CONFIG, NetworkConfig, NetworkingMethod } from '../types/network'
import { defineProps, defineEmits, ref, } from 'vue'
import { useI18n } from 'vue-i18n'
@@ -155,9 +155,6 @@ const bool_flags: BoolFlag[] = [
{ field: 'multi_thread', help: 'multi_thread_help' },
{ field: 'proxy_forward_by_system', help: 'proxy_forward_by_system_help' },
{ field: 'disable_encryption', help: 'disable_encryption_help' },
{ field: 'disable_udp_hole_punching', help: 'disable_udp_hole_punching_help' },
{ field: 'enable_magic_dns', help: 'enable_magic_dns_help' },
{ field: 'enable_private_mode', help: 'enable_private_mode_help' },
]
</script>
@@ -199,8 +196,8 @@ const bool_flags: BoolFlag[] = [
</div>
<div class="flex flex-col gap-2 basis-5/12 grow">
<label for="network_secret">{{ t('network_secret') }}</label>
<Password id="network_secret" v-model="curNetwork.network_secret"
aria-describedby="network_secret-help" toggleMask :feedback="false"/>
<InputText id="network_secret" v-model="curNetwork.network_secret"
aria-describedby="network_secret-help" />
</div>
</div>
@@ -312,18 +309,6 @@ const bool_flags: BoolFlag[] = [
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex">
<label for="mtu">{{ t('mtu') }}</label>
<span class="pi pi-question-circle ml-2 self-center"
v-tooltip="t('mtu_help')"></span>
</div>
<InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help"
:format="false" :placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid/>
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-col gap-2 basis-5/12 grow">
<div class="flex">
@@ -390,18 +375,6 @@ const bool_flags: BoolFlag[] = [
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap w-full">
<div class="flex flex-col gap-2 grow p-fluid">
<div class="flex">
<label for="mapped_listeners">{{ t('mapped_listeners') }}</label>
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mapped_listeners_help')"></span>
</div>
<AutoComplete id="mapped_listeners" v-model="curNetwork.mapped_listeners"
:placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full"
multiple fluid :suggestions="peerSuggestions" @complete="searchPeerSuggestions" />
</div>
</div>
</div>
</Panel>

View File

@@ -5,7 +5,7 @@ import { NetworkInstance, type NodeInfo, type PeerRoutePair } from '../types/net
import { useI18n } from 'vue-i18n';
import { computed, onMounted, onUnmounted, ref } from 'vue';
import { ipv4InetToString, ipv4ToString, ipv6ToString } from '../modules/utils';
import { DataTable, Column, Tag, Chip, Button, Dialog, ScrollPanel, Timeline, Divider, Card, } from 'primevue';
import { DataTable, Column, Tag, Chip, Button, Dialog, ScrollPanel, Timeline, Divider, Panel, } from 'primevue';
const props = defineProps<{
curNetworkInst: NetworkInstance | null,
@@ -106,10 +106,6 @@ function ipFormat(info: PeerRoutePair) {
return ip ? `${IPv4.fromNumber(ip.address.addr)}/${ip.network_length}` : ''
}
function tunnelProto(info: PeerRoutePair) {
return [...new Set(info.peer?.conns.map(c => c.tunnel?.tunnel_type))].join(',')
}
const myNodeInfo = computed(() => {
if (!props.curNetworkInst)
return {} as NodeInfo
@@ -307,9 +303,9 @@ function showEventLogs() {
<template>
<div class="frontend-lib">
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" class="w-full h-auto max-h-full"
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" class="w-2/3 h-auto max-h-full"
:baseZIndex="2000">
<ScrollPanel v-if="dialogHeader === 'vpn_portal_config'">
<ScrollPanel v-if="dialogHeader === 'vpn_portal_config'" class="w-2/3">
<pre>{{ dialogContent }}</pre>
</ScrollPanel>
<Timeline v-else :value="dialogContent">
@@ -323,108 +319,101 @@ function showEventLogs() {
</Timeline>
</Dialog>
<Card v-if="curNetworkInst?.error_msg">
<template #title>
<Panel v-if="curNetworkInst?.error_msg">
<template #header>
Run Network Error
</template>
<template #content>
<div class="flex flex-col gap-y-5">
<div class="text-red-500">
{{ curNetworkInst.error_msg }}
</div>
<div class="flex flex-col gap-y-5">
<div class="text-red-500">
{{ curNetworkInst.error_msg }}
</div>
</template>
</Card>
</div>
</Panel>
<template v-else>
<Card>
<template #title>
<Panel>
<template #header>
{{ t('my_node_info') }}
</template>
<template #content>
<div class="flex w-full flex-col gap-y-5">
<div class="m-0 flex flex-row justify-center gap-x-5">
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid green">
<div class="font-bold">
{{ t('peer_count') }}
</div>
<div class="text-5xl mt-1">
{{ peerCount }}
</div>
<div class="flex w-full flex-col gap-y-5">
<div class="m-0 flex flex-row justify-center gap-x-5">
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid green">
<div class="font-bold">
{{ t('peer_count') }}
</div>
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid purple">
<div class="font-bold">
{{ t('upload') }}
</div>
<div class="text-xl mt-2">
{{ txRate }}/s
</div>
</div>
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid fuchsia">
<div class="font-bold">
{{ t('download') }}
</div>
<div class="text-xl mt-2">
{{ rxRate }}/s
</div>
<div class="text-5xl mt-1">
{{ peerCount }}
</div>
</div>
<div class="flex flex-row items-center flex-wrap w-full max-h-40 overflow-scroll">
<Chip v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon"
class="mr-2 mt-2 text-sm" />
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid purple">
<div class="font-bold">
{{ t('upload') }}
</div>
<div class="text-xl mt-2">
{{ txRate }}/s
</div>
</div>
<div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm">
<Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" />
<Button severity="info" :label="t('show_event_log')" @click="showEventLogs" />
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid fuchsia">
<div class="font-bold">
{{ t('download') }}
</div>
<div class="text-xl mt-2">
{{ rxRate }}/s
</div>
</div>
</div>
</template>
</Card>
<div class="flex flex-row items-center flex-wrap w-full max-h-40 overflow-scroll">
<Chip v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon"
class="mr-2 mt-2 text-sm" />
</div>
<div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm">
<Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" />
<Button severity="info" :label="t('show_event_log')" @click="showEventLogs" />
</div>
</div>
</Panel>
<Divider />
<Card>
<template #title>
<Panel>
<template #header>
{{ t('peer_info') }}
</template>
<template #content>
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full">
<Column :field="ipFormat" :header="t('virtual_ipv4')" />
<Column :header="t('hostname')">
<template #body="slotProps">
<div v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server"
v-tooltip="slotProps.data.route.hostname">
{{
slotProps.data.route.hostname }}
</div>
<div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1">
<Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info">
{{ t('status.server') }}
</Tag>
<Tag v-if="slotProps.data.route.feature_flag.avoid_relay_data" severity="warn" value="Warn">
{{ t('status.relay') }}
</Tag>
</div>
</template>
</Column>
<Column :field="routeCost" :header="t('route_cost')" />
<Column :field="tunnelProto" :header="t('tunnel_proto')" />
<Column :field="latencyMs" :header="t('latency')" />
<Column :field="txBytes" :header="t('upload_bytes')" />
<Column :field="rxBytes" :header="t('download_bytes')" />
<Column :field="lossRate" :header="t('loss_rate')" />
<Column :header="t('status.version')">
<template #body="slotProps">
<span>{{ version(slotProps.data) }}</span>
</template>
</Column>
</DataTable>
</template>
</Card>
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full">
<Column :field="ipFormat" :header="t('virtual_ipv4')" />
<Column :header="t('hostname')">
<template #body="slotProps">
<div v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server"
v-tooltip="slotProps.data.route.hostname">
{{
slotProps.data.route.hostname }}
</div>
<div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1">
<Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info">
{{ t('status.server') }}
</Tag>
<Tag v-if="slotProps.data.route.feature_flag.avoid_relay_data" severity="warn" value="Warn">
{{ t('status.relay') }}
</Tag>
</div>
</template>
</Column>
<Column :field="routeCost" :header="t('route_cost')" />
<Column :field="latencyMs" :header="t('latency')" />
<Column :field="txBytes" :header="t('upload_bytes')" />
<Column :field="rxBytes" :header="t('download_bytes')" />
<Column :field="lossRate" :header="t('loss_rate')" />
<Column :header="t('status.version')">
<template #body="slotProps">
<span>{{ version(slotProps.data) }}</span>
</template>
</Column>
</DataTable>
</Panel>
</template>
</div>
</template>

View File

@@ -64,7 +64,6 @@ event_log: 事件日志
peer_info: 节点信息
hostname: 主机名
route_cost: 路由
tunnel_proto: 协议
latency: 延迟
upload_bytes: 上传
download_bytes: 下载
@@ -110,17 +109,6 @@ proxy_forward_by_system_help: 通过系统内核转发子网代理数据包,
disable_encryption: 禁用加密
disable_encryption_help: 禁用对等节点通信的加密默认为false必须与对等节点相同
disable_udp_hole_punching: 禁用UDP打洞
disable_udp_hole_punching_help: 禁用UDP打洞功能
enable_magic_dns: 启用魔法DNS
enable_magic_dns_help: |
启用魔法DNS允许通过EasyTier的DNS服务器访问其他节点的虚拟IPv4地址 如 node1.et.net。
enable_private_mode: 启用私有模式
enable_private_mode_help: |
启用私有模式,则不允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转。
relay_network_whitelist: 网络白名单
relay_network_whitelist_help: |
仅转发白名单网络的流量,支持通配符字符串。多个网络名称间可以使用英文空格间隔。
@@ -137,16 +125,6 @@ socks5_help: |
exit_nodes: 出口节点列表
exit_nodes_help: 转发所有流量的出口节点虚拟IPv4地址优先级由列表顺序决定
mtu: MTU
mtu_help: |
TUN设备的MTU默认为非加密时为1380加密时为1360。范围400-1380
mtu_placeholder: 留空为默认值1380
mapped_listeners: 监听映射
mapped_listeners_help: |
手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。
例如tcp://123.123.123.123:11223可以指定多个。
status:
version: 内核版本
local: 本机
@@ -191,4 +169,4 @@ event:
VpnPortalClientDisconnected: VPN门户客户端已断开连接
DhcpIpv4Changed: DHCP IPv4地址更改
DhcpIpv4Conflicted: DHCP IPv4地址冲突
PortForwardAdded: 端口转发添加

View File

@@ -62,7 +62,6 @@ show_event_log: Show Event Log
event_log: Event Log
peer_info: Peer Info
route_cost: Route Cost
tunnel_proto: Protocol
hostname: Hostname
latency: Latency
upload_bytes: Upload
@@ -109,17 +108,6 @@ proxy_forward_by_system_help: Forward packet to proxy networks via system kernel
disable_encryption: Disable Encryption
disable_encryption_help: Disable encryption for peers communication, default is false, must be same with peers
disable_udp_hole_punching: Disable UDP Hole Punching
disable_udp_hole_punching_help: Disable udp hole punching
enable_magic_dns: Enable Magic DNS
enable_magic_dns_help: |
Enable magic dns, all nodes in the network can access each other by domain name, e.g.: node1.et.net.
enable_private_mode: Enable Private Mode
enable_private_mode_help: |
Enable private mode, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node.
relay_network_whitelist: Network Whitelist
relay_network_whitelist_help: |
Only forward traffic from the whitelist networks, supporting wildcard strings, multiple network names can be separated by spaces.
@@ -137,16 +125,6 @@ socks5_help: |
exit_nodes: Exit Nodes
exit_nodes_help: Exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list
mtu: MTU
mtu_help: |
MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380
mtu_placeholder: Leave blank as default value 1380
mapped_listeners: Map Listeners
mapped_listeners_help: |
Manually specify the public address of the listener, other nodes can use this address to connect to this node.
e.g.: tcp://123.123.123.123:11223, can specify multiple.
status:
version: Version
local: Local
@@ -191,4 +169,3 @@ event:
VpnPortalClientDisconnected: VpnPortalClientDisconnected
DhcpIpv4Changed: DhcpIpv4Changed
DhcpIpv4Conflicted: DhcpIpv4Conflicted
PortForwardAdded: PortForwardAdded

View File

@@ -47,7 +47,6 @@ export interface NetworkConfig {
multi_thread?: boolean
proxy_forward_by_system?: boolean
disable_encryption?: boolean
disable_udp_hole_punching?: boolean
enable_relay_network_whitelist?: boolean
relay_network_whitelist: string[]
@@ -59,12 +58,6 @@ export interface NetworkConfig {
enable_socks5?: boolean
socks5_port: number
mtu: number | null
mapped_listeners: string[]
enable_magic_dns?: boolean
enable_private_mode?: boolean
}
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
@@ -111,7 +104,6 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
multi_thread: true,
proxy_forward_by_system: false,
disable_encryption: false,
disable_udp_hole_punching: false,
enable_relay_network_whitelist: false,
relay_network_whitelist: [],
enable_manual_routes: false,
@@ -119,10 +111,6 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
exit_nodes: [],
enable_socks5: false,
socks5_port: 1080,
mtu: null,
mapped_listeners: [],
enable_magic_dns: false,
enable_private_mode: false,
}
}
@@ -269,6 +257,4 @@ export enum EventType {
DhcpIpv4Changed = 'DhcpIpv4Changed', // ipv4 | null, ipv4 | null
DhcpIpv4Conflicted = 'DhcpIpv4Conflicted', // ipv4 | null
PortForwardAdded = 'PortForwardAdded', // PortForwardConfigPb
}

View File

@@ -5,7 +5,6 @@
<link rel="icon" type="image/png" href="/easytier.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>EasyTier Dashboard</title>
<script src="/api_meta.js"></script>
</head>
<body>
<div id="app"></div>

View File

@@ -9,11 +9,11 @@
"preview": "vite preview"
},
"dependencies": {
"@primevue/themes": "4.3.3",
"@primevue/themes": "^4.2.1",
"aura": "link:@primevue/themes/aura",
"axios": "^1.7.7",
"easytier-frontend-lib": "workspace:*",
"primevue": "4.3.3",
"primevue": "^4.2.1",
"tailwindcss-primeui": "^0.3.4",
"vue": "^3.5.12",
"vue-router": "4"

View File

@@ -1,32 +1,16 @@
<script setup lang="ts">
import { NetworkTypes } from 'easytier-frontend-lib';
import {computed, ref} from 'vue';
import { ref } from 'vue';
import { Api } from 'easytier-frontend-lib'
import {AutoComplete, Divider} from "primevue";
import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host"
const api = computed<Api.ApiClient>(() => new Api.ApiClient(apiHost.value));
const apiHost = ref<string>(getInitialApiHost())
const apiHostSuggestions = ref<Array<string>>([])
const apiHostSearch = async (event: { query: string }) => {
apiHostSuggestions.value = [];
let hosts = cleanAndLoadApiHosts();
if (event.query) {
apiHostSuggestions.value.push(event.query);
}
hosts.forEach((host) => {
apiHostSuggestions.value.push(host.value);
});
}
const defaultApiHost = 'https://config-server.easytier.cn'
const api = new Api.ApiClient(defaultApiHost);
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
const toml_config = ref<string>("Press 'Run Network' to generate TOML configuration");
const generateConfig = (config: NetworkTypes.NetworkConfig) => {
saveApiHost(apiHost.value)
api.value?.generate_config({
api.generate_config({
config: config
}).then((res) => {
if (res.error) {
@@ -45,14 +29,6 @@ const generateConfig = (config: NetworkTypes.NetworkConfig) => {
<div class="flex items-center justify-center m-5">
<div class="sm:block md:flex w-full">
<div class="sm:w-full md:w-1/2 p-4">
<div class="flex flex-col">
<div class="w-11/12 self-center ">
<label>ApiHost</label>
<AutoComplete id="api-host" v-model="apiHost" dropdown :suggestions="apiHostSuggestions"
@complete="apiHostSearch" class="w-full" />
<Divider />
</div>
</div>
<Config :cur-network="newNetworkConfig" @run-network="generateConfig" />
</div>
<div class="sm:w-full md:w-1/2 p-4 bg-gray-100">

View File

@@ -160,7 +160,6 @@ const createNewNetwork = async () => {
const newNetwork = () => {
newNetworkConfig.value = NetworkTypes.DEFAULT_NETWORK_CONFIG();
newNetworkConfig.value.hostname = deviceInfo.value?.hostname;
isEditing.value = false;
showCreateNetworkDialog.value = true;
}

View File

@@ -4,7 +4,6 @@ import { Card, InputText, Password, Button, AutoComplete } from 'primevue';
import { useRouter } from 'vue-router';
import { useToast } from 'primevue/usetoast';
import { Api } from 'easytier-frontend-lib';
import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host"
defineProps<{
isRegistering: boolean;
@@ -21,6 +20,56 @@ const registerPassword = ref('');
const captcha = ref('');
const captchaSrc = computed(() => api.value.captcha_url());
interface ApiHost {
value: string;
usedAt: number;
}
const isValidHttpUrl = (s: string): boolean => {
let url;
try {
url = new URL(s);
} catch (_) {
return false;
}
return url.protocol === "http:" || url.protocol === "https:";
}
const cleanAndLoadApiHosts = (): Array<ApiHost> => {
const maxHosts = 10;
const apiHosts = localStorage.getItem('apiHosts');
if (apiHosts) {
const hosts: Array<ApiHost> = JSON.parse(apiHosts);
// sort by usedAt
hosts.sort((a, b) => b.usedAt - a.usedAt);
// only keep the first 10
if (hosts.length > maxHosts) {
hosts.splice(maxHosts);
}
localStorage.setItem('apiHosts', JSON.stringify(hosts));
return hosts;
} else {
return [];
}
};
const saveApiHost = (host: string) => {
console.log('Save API Host:', host);
if (!isValidHttpUrl(host)) {
console.error('Invalid API Host:', host);
return;
}
let hosts = cleanAndLoadApiHosts();
const newHost: ApiHost = { value: host, usedAt: Date.now() };
hosts = hosts.filter((h) => h.value !== host);
hosts.push(newHost);
localStorage.setItem('apiHosts', JSON.stringify(hosts));
};
const onSubmit = async () => {
// Add your login logic here
@@ -51,6 +100,16 @@ const onRegister = async () => {
}
};
const getInitialApiHost = (): string => {
const hosts = cleanAndLoadApiHosts();
if (hosts.length > 0) {
return hosts[0].value;
} else {
return defaultApiHost;
}
};
const defaultApiHost = 'https://config-server.easytier.cn'
const apiHost = ref<string>(getInitialApiHost())
const apiHostSuggestions = ref<Array<string>>([])
const apiHostSearch = async (event: { query: string }) => {
@@ -65,7 +124,10 @@ const apiHostSearch = async (event: { query: string }) => {
}
onMounted(() => {
let hosts = cleanAndLoadApiHosts();
if (hosts.length === 0) {
saveApiHost(defaultApiHost);
}
});
</script>

View File

@@ -1,71 +0,0 @@
interface ApiHost {
value: string;
usedAt: number;
}
let apiMeta: {
api_host: string;
} | undefined = (window as any).apiMeta;
// remove trailing slashes from the URL
const cleanUrl = (url: string) => url.replace(/\/+$/, '');
const defaultApiHost = cleanUrl(apiMeta?.api_host ?? `${location.origin}${location.pathname}`);
const isValidHttpUrl = (s: string): boolean => {
let url;
try {
url = new URL(s);
} catch (_) {
return false;
}
return url.protocol === "http:" || url.protocol === "https:";
};
const cleanAndLoadApiHosts = (): Array<ApiHost> => {
const maxHosts = 10;
const apiHosts = localStorage.getItem('apiHosts');
if (apiHosts) {
const hosts: Array<ApiHost> = JSON.parse(apiHosts);
// sort by usedAt
hosts.sort((a, b) => b.usedAt - a.usedAt);
// only keep the first 10
if (hosts.length > maxHosts) {
hosts.splice(maxHosts);
}
localStorage.setItem('apiHosts', JSON.stringify(hosts));
return hosts;
} else {
return [];
}
};
const saveApiHost = (host: string) => {
console.log('Save API Host:', host);
if (!isValidHttpUrl(host)) {
console.error('Invalid API Host:', host);
return;
}
let hosts = cleanAndLoadApiHosts();
const newHost: ApiHost = { value: host, usedAt: Date.now() };
hosts = hosts.filter((h) => h.value !== host);
hosts.push(newHost);
localStorage.setItem('apiHosts', JSON.stringify(hosts));
};
const getInitialApiHost = (): string => {
const hosts = cleanAndLoadApiHosts();
if (hosts.length > 0) {
return hosts[0].value;
} else {
saveApiHost(defaultApiHost)
return defaultApiHost;
}
};
export { getInitialApiHost, cleanAndLoadApiHosts, saveApiHost }

View File

@@ -1,22 +1,9 @@
import { defineConfig } from 'vite'
import vue from '@vitejs/plugin-vue'
// import { viteSingleFile } from "vite-plugin-singlefile"
const WEB_BASE_URL = process.env.WEB_BASE_URL || '';
const API_BASE_URL = process.env.API_BASE_URL || 'http://localhost:11211';
import { viteSingleFile } from "vite-plugin-singlefile"
// https://vite.dev/config/
export default defineConfig({
base: WEB_BASE_URL,
plugins: [vue(),/* viteSingleFile() */],
server: {
proxy: {
"/api": {
target: API_BASE_URL,
},
"/api_meta.js": {
target: API_BASE_URL,
},
}
}
base: '',
plugins: [vue(), viteSingleFile()],
})

View File

@@ -22,12 +22,3 @@ cli:
api_server_port:
en: "The port to listen for the restful server, acting as ApiHost and used by the web frontend"
zh-CN: "restful 服务器的监听端口,作为 ApiHost 并被 web 前端使用"
web_server_port:
en: "The port to listen for the web dashboard server, default is same as the api server port"
zh-CN: "web dashboard 服务器的监听端口, 默认为与 api 服务器端口相同"
no_web:
en: "Do not run the web dashboard server"
zh-CN: "不运行 web dashboard 服务器"
api_host:
en: "The URL of the API server, used by the web frontend to connect to"
zh-CN: "API 服务器的 URL用于 web 前端连接"

View File

@@ -10,7 +10,7 @@ use easytier::{
use session::Session;
use storage::{Storage, StorageToken};
use crate::db::{Db, UserIdInDb};
use crate::db::Db;
#[derive(Debug)]
pub struct ClientManager {
@@ -86,21 +86,15 @@ impl ClientManager {
ret
}
pub fn get_session_by_machine_id(
&self,
user_id: UserIdInDb,
machine_id: &uuid::Uuid,
) -> Option<Arc<Session>> {
let c_url = self
.storage
.get_client_url_by_machine_id(user_id, machine_id)?;
pub fn get_session_by_machine_id(&self, machine_id: &uuid::Uuid) -> Option<Arc<Session>> {
let c_url = self.storage.get_client_url_by_machine_id(machine_id)?;
self.client_sessions
.get(&c_url)
.map(|item| item.value().clone())
}
pub async fn list_machine_by_user_id(&self, user_id: UserIdInDb) -> Vec<url::Url> {
self.storage.list_user_clients(user_id)
pub async fn list_machine_by_token(&self, token: String) -> Vec<url::Url> {
self.storage.list_token_clients(&token)
}
pub async fn get_heartbeat_requests(&self, client_url: &url::Url) -> Option<HeartbeatRequest> {
@@ -124,7 +118,6 @@ mod tests {
},
web_client::WebClient,
};
use sqlx::Executor;
use crate::{client_manager::ClientManager, db::Db};
@@ -134,14 +127,8 @@ mod tests {
let mut mgr = ClientManager::new(Db::memory_db().await);
mgr.serve(Box::new(listener)).await.unwrap();
mgr.db()
.inner()
.execute("INSERT INTO users (username, password) VALUES ('test', 'test')")
.await
.unwrap();
let connector = UdpTunnelConnector::new("udp://127.0.0.1:54333".parse().unwrap());
let _c = WebClient::new(connector, "test", "test");
let _c = WebClient::new(connector, "test");
wait_for_condition(
|| async { mgr.client_sessions.len() == 1 },

View File

@@ -1,6 +1,5 @@
use std::{fmt::Debug, str::FromStr as _, sync::Arc};
use anyhow::Context;
use easytier::{
common::scoped_task::ScopedTask,
proto::{
@@ -69,66 +68,6 @@ struct SessionRpcService {
data: SharedSessionData,
}
impl SessionRpcService {
async fn handle_heartbeat(
&self,
req: HeartbeatRequest,
) -> rpc_types::error::Result<HeartbeatResponse> {
let mut data = self.data.write().await;
let Ok(storage) = Storage::try_from(data.storage.clone()) else {
tracing::error!("Failed to get storage");
return Ok(HeartbeatResponse {});
};
let machine_id: uuid::Uuid =
req.machine_id
.clone()
.map(Into::into)
.ok_or(anyhow::anyhow!(
"Machine id is not set correctly, expect uuid but got: {:?}",
req.machine_id
))?;
let user_id = storage
.db()
.get_user_id_by_token(req.user_token.clone())
.await
.with_context(|| {
format!(
"Failed to get user id by token from db: {:?}",
req.user_token
)
})?
.ok_or(anyhow::anyhow!(
"User not found by token: {:?}",
req.user_token
))?;
if data.req.replace(req.clone()).is_none() {
assert!(data.storage_token.is_none());
data.storage_token = Some(StorageToken {
token: req.user_token.clone().into(),
client_url: data.client_url.clone(),
machine_id,
user_id,
});
}
let Ok(report_time) = chrono::DateTime::<chrono::Local>::from_str(&req.report_time) else {
tracing::error!("Failed to parse report time: {:?}", req.report_time);
return Ok(HeartbeatResponse {});
};
storage.update_client(
data.storage_token.as_ref().unwrap().clone(),
report_time.timestamp(),
);
let _ = data.notifier.send(req);
Ok(HeartbeatResponse {})
}
}
#[async_trait::async_trait]
impl WebServerService for SessionRpcService {
type Controller = BaseController;
@@ -138,13 +77,34 @@ impl WebServerService for SessionRpcService {
_: BaseController,
req: HeartbeatRequest,
) -> rpc_types::error::Result<HeartbeatResponse> {
let ret = self.handle_heartbeat(req).await;
if ret.is_err() {
tracing::warn!("Failed to handle heartbeat: {:?}", ret);
// sleep for a while to avoid client busy loop
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
let mut data = self.data.write().await;
if data.req.replace(req.clone()).is_none() {
assert!(data.storage_token.is_none());
data.storage_token = Some(StorageToken {
token: req.user_token.clone().into(),
client_url: data.client_url.clone(),
machine_id: req
.machine_id
.clone()
.map(Into::into)
.unwrap_or(uuid::Uuid::new_v4()),
});
}
ret
if let Ok(storage) = Storage::try_from(data.storage.clone()) {
let Ok(report_time) = chrono::DateTime::<chrono::Local>::from_str(&req.report_time)
else {
tracing::error!("Failed to parse report time: {:?}", req.report_time);
return Ok(HeartbeatResponse {});
};
storage.update_client(
data.storage_token.as_ref().unwrap().clone(),
report_time.timestamp(),
);
}
let _ = data.notifier.send(req);
Ok(HeartbeatResponse {})
}
}

View File

@@ -2,7 +2,7 @@ use std::sync::{Arc, Weak};
use dashmap::DashMap;
use crate::db::{Db, UserIdInDb};
use crate::db::Db;
// use this to maintain Storage
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
@@ -10,19 +10,21 @@ pub struct StorageToken {
pub token: String,
pub client_url: url::Url,
pub machine_id: uuid::Uuid,
pub user_id: UserIdInDb,
}
#[derive(Debug, Clone)]
struct ClientInfo {
storage_token: StorageToken,
client_url: url::Url,
machine_id: uuid::Uuid,
token: String,
report_time: i64,
}
#[derive(Debug)]
pub struct StorageInner {
// some map for indexing
user_clients_map: DashMap<UserIdInDb, DashMap<uuid::Uuid, ClientInfo>>,
token_clients_map: DashMap<String, DashMap<uuid::Uuid, ClientInfo>>,
machine_client_url_map: DashMap<uuid::Uuid, ClientInfo>,
pub db: Db,
}
@@ -41,7 +43,8 @@ impl TryFrom<WeakRefStorage> for Storage {
impl Storage {
pub fn new(db: Db) -> Self {
Storage(Arc::new(StorageInner {
user_clients_map: DashMap::new(),
token_clients_map: DashMap::new(),
machine_client_url_map: DashMap::new(),
db,
}))
}
@@ -51,22 +54,17 @@ impl Storage {
machine_id: &uuid::Uuid,
client_url: &url::Url,
) {
map.remove_if(&machine_id, |_, v| {
v.storage_token.client_url == *client_url
});
map.remove_if(&machine_id, |_, v| v.client_url == *client_url);
}
fn update_mid_to_client_info_map(
map: &DashMap<uuid::Uuid, ClientInfo>,
client_info: &ClientInfo,
) {
map.entry(client_info.storage_token.machine_id)
map.entry(client_info.machine_id)
.and_modify(|e| {
if e.report_time < client_info.report_time {
assert_eq!(
e.storage_token.machine_id,
client_info.storage_token.machine_id
);
assert_eq!(e.machine_id, client_info.machine_id);
*e = client_info.clone();
}
})
@@ -76,51 +74,53 @@ impl Storage {
pub fn update_client(&self, stoken: StorageToken, report_time: i64) {
let inner = self
.0
.user_clients_map
.entry(stoken.user_id)
.token_clients_map
.entry(stoken.token.clone())
.or_insert_with(DashMap::new);
let client_info = ClientInfo {
storage_token: stoken.clone(),
client_url: stoken.client_url.clone(),
machine_id: stoken.machine_id,
token: stoken.token.clone(),
report_time,
};
Self::update_mid_to_client_info_map(&inner, &client_info);
Self::update_mid_to_client_info_map(&self.0.machine_client_url_map, &client_info);
}
pub fn remove_client(&self, stoken: &StorageToken) {
self.0
.user_clients_map
.remove_if(&stoken.user_id, |_, set| {
Self::remove_mid_to_client_info_map(set, &stoken.machine_id, &stoken.client_url);
set.is_empty()
});
self.0.token_clients_map.remove_if(&stoken.token, |_, set| {
Self::remove_mid_to_client_info_map(set, &stoken.machine_id, &stoken.client_url);
set.is_empty()
});
Self::remove_mid_to_client_info_map(
&self.0.machine_client_url_map,
&stoken.machine_id,
&stoken.client_url,
);
}
pub fn weak_ref(&self) -> WeakRefStorage {
Arc::downgrade(&self.0)
}
pub fn get_client_url_by_machine_id(
&self,
user_id: UserIdInDb,
machine_id: &uuid::Uuid,
) -> Option<url::Url> {
self.0.user_clients_map.get(&user_id).and_then(|info_map| {
info_map
.get(machine_id)
.map(|info| info.storage_token.client_url.clone())
})
pub fn get_client_url_by_machine_id(&self, machine_id: &uuid::Uuid) -> Option<url::Url> {
self.0
.machine_client_url_map
.get(&machine_id)
.map(|info| info.client_url.clone())
}
pub fn list_user_clients(&self, user_id: UserIdInDb) -> Vec<url::Url> {
pub fn list_token_clients(&self, token: &str) -> Vec<url::Url> {
self.0
.user_clients_map
.get(&user_id)
.token_clients_map
.get(token)
.map(|info_map| {
info_map
.iter()
.map(|info| info.value().storage_token.client_url.clone())
.map(|info| info.value().client_url.clone())
.collect()
})
.unwrap_or_default()

View File

@@ -12,7 +12,7 @@ use sqlx::{migrate::MigrateDatabase as _, types::chrono, Sqlite, SqlitePool};
use crate::migrator;
pub type UserIdInDb = i32;
type UserIdInDb = i32;
pub enum ListNetworkProps {
All,

View File

@@ -5,16 +5,14 @@ extern crate rust_i18n;
use std::sync::Arc;
use clap::Parser;
use clap::{command, Parser};
use easytier::{
common::{
config::{ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, TomlConfigLoader},
constants::EASYTIER_VERSION,
error::Error,
},
tunnel::{
tcp::TcpTunnelListener, udp::UdpTunnelListener, websocket::WSTunnelListener, TunnelListener,
},
tunnel::{tcp::TcpTunnelListener, udp::UdpTunnelListener, TunnelListener},
utils::{init_logger, setup_panic_handler},
};
@@ -23,13 +21,10 @@ mod db;
mod migrator;
mod restful;
#[cfg(feature = "embed")]
mod web;
rust_i18n::i18n!("locales", fallback = "en");
#[derive(Parser, Debug)]
#[command(name = "easytier-web", author, version = EASYTIER_VERSION , about, long_about = None)]
#[command(name = "easytier-core", author, version = EASYTIER_VERSION , about, long_about = None)]
struct Cli {
#[arg(short, long, default_value = "et.db", help = t!("cli.db").to_string())]
db: String,
@@ -75,36 +70,14 @@ struct Cli {
help = t!("cli.api_server_port").to_string(),
)]
api_server_port: u16,
#[cfg(feature = "embed")]
#[arg(
long,
short='l',
help = t!("cli.web_server_port").to_string(),
)]
web_server_port: Option<u16>,
#[cfg(feature = "embed")]
#[arg(
long,
help = t!("cli.no_web").to_string(),
default_value = "false"
)]
no_web: bool,
#[cfg(feature = "embed")]
#[arg(
long,
help = t!("cli.api_host").to_string()
)]
api_host: Option<url::Url>,
}
pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> {
pub fn get_listener_by_url(
l: &url::Url,
) -> Result<Box<dyn TunnelListener>, Error> {
Ok(match l.scheme() {
"tcp" => Box::new(TcpTunnelListener::new(l.clone())),
"udp" => Box::new(UdpTunnelListener::new(l.clone())),
"ws" => Box::new(WSTunnelListener::new(l.clone())),
_ => {
return Err(Error::InvalidUrl(l.to_string()));
}
@@ -133,62 +106,20 @@ async fn main() {
let db = db::Db::new(cli.db).await.unwrap();
let listener = get_listener_by_url(
&format!(
"{}://0.0.0.0:{}",
cli.config_server_protocol, cli.config_server_port
)
.parse()
.unwrap(),
&format!("{}://0.0.0.0:{}", cli.config_server_protocol, cli.config_server_port).parse().unwrap(),
)
.unwrap();
let mut mgr = client_manager::ClientManager::new(db.clone());
mgr.serve(listener).await.unwrap();
let mgr = Arc::new(mgr);
#[cfg(feature = "embed")]
let (web_router_restful, web_router_static) = if cli.no_web {
(None, None)
} else {
let web_router = web::build_router(cli.api_host.clone());
if cli.web_server_port.is_none() || cli.web_server_port == Some(cli.api_server_port) {
(Some(web_router), None)
} else {
(None, Some(web_router))
}
};
#[cfg(not(feature = "embed"))]
let web_router_restful = None;
let _restful_server_tasks = restful::RestfulServer::new(
let mut restful_server = restful::RestfulServer::new(
format!("0.0.0.0:{}", cli.api_server_port).parse().unwrap(),
mgr.clone(),
db,
web_router_restful,
)
.await
.unwrap()
.start()
.await
.unwrap();
#[cfg(feature = "embed")]
let _web_server_task = if let Some(web_router) = web_router_static {
Some(
web::WebServer::new(
format!("0.0.0.0:{}", cli.web_server_port.unwrap_or(0))
.parse()
.unwrap(),
web_router,
)
.await
.unwrap()
.start()
.await
.unwrap(),
)
} else {
None
};
restful_server.start().await.unwrap();
tokio::signal::ctrl_c().await.unwrap();
}

View File

@@ -9,7 +9,7 @@ use axum::http::StatusCode;
use axum::routing::post;
use axum::{extract::State, routing::get, Json, Router};
use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer};
use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend};
use axum_login::{login_required, AuthManagerLayerBuilder, AuthzBackend};
use axum_messages::MessagesManagerLayer;
use easytier::common::config::ConfigLoader;
use easytier::common::scoped_task::ScopedTask;
@@ -24,26 +24,20 @@ use tower_sessions::Expiry;
use tower_sessions_sqlx_store::SqliteStore;
use users::{AuthSession, Backend};
use crate::client_manager::session::Session;
use crate::client_manager::storage::StorageToken;
use crate::client_manager::ClientManager;
use crate::db::Db;
/// Embed assets for web dashboard, build frontend first
#[cfg(feature = "embed")]
#[derive(rust_embed::RustEmbed, Clone)]
#[folder = "frontend/dist/"]
struct Assets;
pub struct RestfulServer {
bind_addr: SocketAddr,
client_mgr: Arc<ClientManager>,
db: Db,
// serve_task: Option<ScopedTask<()>>,
// delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>,
network_api: NetworkApi,
serve_task: Option<ScopedTask<()>>,
delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>,
web_router: Option<Router>,
network_api: NetworkApi,
}
type AppStateInner = Arc<ClientManager>;
@@ -93,7 +87,6 @@ impl RestfulServer {
bind_addr: SocketAddr,
client_mgr: Arc<ClientManager>,
db: Db,
web_router: Option<Router>,
) -> anyhow::Result<Self> {
assert!(client_mgr.is_running());
@@ -103,13 +96,23 @@ impl RestfulServer {
bind_addr,
client_mgr,
db,
// serve_task: None,
// delete_task: None,
serve_task: None,
delete_task: None,
network_api,
web_router,
})
}
async fn get_session_by_machine_id(
client_mgr: &ClientManager,
machine_id: &uuid::Uuid,
) -> Result<Arc<Session>, HttpHandleError> {
let Some(result) = client_mgr.get_session_by_machine_id(machine_id) else {
return Err((StatusCode::NOT_FOUND, other_error("No such session").into()));
};
Ok(result)
}
async fn handle_list_all_sessions(
auth_session: AuthSession,
State(client_mgr): AppState,
@@ -132,7 +135,9 @@ impl RestfulServer {
return Err((StatusCode::UNAUTHORIZED, other_error("No such user").into()));
};
let machines = client_mgr.list_machine_by_user_id(user.id().clone()).await;
let machines = client_mgr
.list_machine_by_token(user.tokens[0].clone())
.await;
Ok(GetSummaryJsonResp {
device_count: machines.len() as u32,
@@ -158,15 +163,7 @@ impl RestfulServer {
}
}
pub async fn start(
mut self,
) -> Result<
(
ScopedTask<()>,
ScopedTask<tower_sessions::session_store::Result<()>>,
),
anyhow::Error,
> {
pub async fn start(&mut self) -> Result<(), anyhow::Error> {
let listener = TcpListener::bind(self.bind_addr).await?;
// Session layer.
@@ -176,13 +173,14 @@ impl RestfulServer {
let session_store = SqliteStore::new(self.db.inner());
session_store.migrate().await?;
let delete_task: ScopedTask<tower_sessions::session_store::Result<()>> =
self.delete_task.replace(
tokio::task::spawn(
session_store
.clone()
.continuously_delete_expired(tokio::time::Duration::from_secs(60)),
)
.into();
.into(),
);
// Generate a cryptographic key to sign the session cookie.
let key = Key::generate();
@@ -221,18 +219,11 @@ impl RestfulServer {
.layer(tower_http::cors::CorsLayer::very_permissive())
.layer(compression_layer);
#[cfg(feature = "embed")]
let app = if let Some(web_router) = self.web_router.take() {
app.merge(web_router)
} else {
app
};
let serve_task: ScopedTask<()> = tokio::spawn(async move {
let task = tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
})
.into();
});
self.serve_task = Some(task.into());
Ok((serve_task, delete_task))
Ok(())
}
}

View File

@@ -5,6 +5,7 @@ use axum::http::StatusCode;
use axum::routing::{delete, post};
use axum::{extract::State, routing::get, Json, Router};
use axum_login::AuthUser;
use dashmap::DashSet;
use easytier::launcher::NetworkConfig;
use easytier::proto::common::Void;
use easytier::proto::rpc_types::controller::BaseController;
@@ -12,7 +13,7 @@ use easytier::proto::web::*;
use crate::client_manager::session::Session;
use crate::client_manager::ClientManager;
use crate::db::{ListNetworkProps, UserIdInDb};
use crate::db::ListNetworkProps;
use super::users::AuthSession;
use super::{
@@ -80,24 +81,12 @@ impl NetworkApi {
Self {}
}
fn get_user_id(auth_session: &AuthSession) -> Result<UserIdInDb, (StatusCode, Json<Error>)> {
let Some(user_id) = auth_session.user.as_ref().map(|x| x.id()) else {
return Err((
StatusCode::UNAUTHORIZED,
other_error(format!("No user id found")).into(),
));
};
Ok(user_id)
}
async fn get_session_by_machine_id(
auth_session: &AuthSession,
client_mgr: &ClientManager,
machine_id: &uuid::Uuid,
) -> Result<Arc<Session>, HttpHandleError> {
let user_id = Self::get_user_id(auth_session)?;
let Some(result) = client_mgr.get_session_by_machine_id(user_id, machine_id) else {
let Some(result) = client_mgr.get_session_by_machine_id(machine_id) else {
return Err((
StatusCode::NOT_FOUND,
other_error(format!("No such session: {}", machine_id)).into(),
@@ -300,13 +289,23 @@ impl NetworkApi {
auth_session: AuthSession,
State(client_mgr): AppState,
) -> Result<Json<ListMachineJsonResp>, HttpHandleError> {
let user_id = Self::get_user_id(&auth_session)?;
let tokens = auth_session
.user
.as_ref()
.map(|x| x.tokens.clone())
.unwrap_or_default();
let client_urls = client_mgr.list_machine_by_user_id(user_id).await;
let client_urls = DashSet::new();
for token in tokens {
let urls = client_mgr.list_machine_by_token(token).await;
for url in urls {
client_urls.insert(url);
}
}
let mut machines = vec![];
for item in client_urls.iter() {
let client_url = item.clone();
let client_url = item.key().clone();
let session = client_mgr.get_heartbeat_requests(&client_url).await;
machines.push(ListMachineItem {
client_url: Some(client_url),

View File

@@ -1,86 +0,0 @@
use axum::{
extract::State,
http::header,
response::{IntoResponse, Response},
routing, Router,
};
use axum_embed::ServeEmbed;
use easytier::common::scoped_task::ScopedTask;
use rust_embed::RustEmbed;
use std::net::SocketAddr;
use tokio::net::TcpListener;
/// Embed assets for web dashboard, build frontend first
#[derive(RustEmbed, Clone)]
#[folder = "frontend/dist/"]
struct Assets;
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct ApiMetaResponse {
api_host: String,
}
async fn handle_api_meta(State(api_host): State<url::Url>) -> impl IntoResponse {
Response::builder()
.header(
header::CONTENT_TYPE,
"application/javascript; charset=utf-8",
)
.header(header::CACHE_CONTROL, "no-cache, no-store, must-revalidate")
.header(header::PRAGMA, "no-cache")
.header(header::EXPIRES, "0")
.body(format!(
"window.apiMeta = {}",
serde_json::to_string(&ApiMetaResponse {
api_host: api_host.to_string()
})
.unwrap(),
))
.unwrap()
}
pub fn build_router(api_host: Option<url::Url>) -> Router {
let service = ServeEmbed::<Assets>::new();
let router = Router::new();
let router = if let Some(api_host) = api_host {
let sub_router = Router::new()
.route("/api_meta.js", routing::get(handle_api_meta))
.with_state(api_host);
router.merge(sub_router)
} else {
router
};
let router = router.fallback_service(service);
router
}
pub struct WebServer {
bind_addr: SocketAddr,
router: Router,
serve_task: Option<ScopedTask<()>>,
}
impl WebServer {
pub async fn new(bind_addr: SocketAddr, router: Router) -> anyhow::Result<Self> {
Ok(WebServer {
bind_addr,
router,
serve_task: None,
})
}
pub async fn start(self) -> Result<ScopedTask<()>, anyhow::Error> {
let listener = TcpListener::bind(self.bind_addr).await?;
let app = self.router;
let task = tokio::spawn(async move {
axum::serve(listener, app).await.unwrap();
})
.into();
Ok(task)
}
}

View File

@@ -3,7 +3,7 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/EasyTier/EasyTier"
repository = "https://github.com/EasyTier/EasyTier"
version = "2.3.1"
version = "2.2.3"
edition = "2021"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
@@ -129,7 +129,6 @@ clap = { version = "4.5.30", features = [
"unicode",
"derive",
"wrap_help",
"env",
] }
async-recursion = "1.0.5"
@@ -137,8 +136,7 @@ async-recursion = "1.0.5"
network-interface = "2.0"
# for ospf route
petgraph = "0.8.1"
hashbrown = "0.15.3"
petgraph = "0.7.1"
# for wireguard
boringtun = { package = "boringtun-easytier", version = "0.6.1", optional = true }
@@ -154,7 +152,7 @@ humansize = "2.1.3"
base64 = "0.22"
mimalloc = { version = "*", optional = true }
mimalloc-rust = { version = "0.2.1", optional = true }
# mips
atomic-shim = "0.2.0"
@@ -164,14 +162,8 @@ smoltcp = { version = "0.12.0", optional = true, default-features = false, featu
"medium-ip",
"proto-ipv4",
"proto-ipv6",
"proto-ipv4-fragmentation",
"fragmentation-buffer-size-8192",
"assembler-max-segment-count-16",
"reassembly-buffer-size-8192",
"reassembly-buffer-count-16",
"socket-tcp",
"socket-udp",
# "socket-tcp-cubic",
"socket-tcp-cubic",
"async",
] }
parking_lot = { version = "0.12.0", optional = true }
@@ -184,9 +176,9 @@ sys-locale = "0.3"
ringbuf = "0.4.5"
async-ringbuf = "0.3.1"
service-manager = { git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main" }
service-manager = {git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main"}
zstd = { version = "0.13" }
async-compression = { version = "0.4.17", default-features = false, features = ["zstd", "tokio"] }
kcp-sys = { git = "https://github.com/EasyTier/kcp-sys" }
@@ -195,29 +187,12 @@ prost-reflect = { version = "0.14.5", default-features = false, features = [
] }
# for http connector
http_req = { git = "https://github.com/EasyTier/http_req.git", default-features = false, features = [
"rust-tls",
] }
http_req = { git = "https://github.com/EasyTier/http_req.git", default-features = false, features = ["rust-tls"] }
# for dns connector
hickory-resolver = "0.25.2"
hickory-proto = "0.25.2"
hickory-resolver = "0.24.4"
# for magic dns
hickory-client = "0.25.2"
hickory-server = { version = "0.25.2", features = ["resolver"] }
derive_builder = "0.20.2"
humantime-serde = "1.1.1"
multimap = "0.10.0"
version-compare = "0.2.0"
jemallocator = { version = "0.5.4", optional = true }
jemalloc-ctl = { version = "0.5.4", optional = true }
jemalloc-sys = { version = "0.5.4", features = [
"stats",
"profiling",
"unprefixed_malloc_on_supported_platforms",
], optional = true }
bounded_join_set = "0.3.0"
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
machine-uid = "0.5.3"
@@ -227,10 +202,6 @@ netlink-sys = "0.8.7"
netlink-packet-route = "0.21.0"
netlink-packet-core = { version = "0.7.0" }
netlink-packet-utils = "0.5.2"
# for magic dns
resolv-conf = "0.7.3"
dbus = { version = "0.9.7", features = ["vendored"] }
which = "7.0.3"
[target.'cfg(windows)'.dependencies]
windows = { version = "0.52.0", features = [
@@ -241,7 +212,7 @@ windows = { version = "0.52.0", features = [
"Win32_System_Ole",
"Win32_Networking_WinSock",
"Win32_System_IO",
] }
]}
encoding = "0.2"
winreg = "0.52"
windows-service = "0.7.0"
@@ -251,28 +222,18 @@ tonic-build = "0.12"
globwalk = "0.8.1"
regex = "1"
prost-build = "0.13.2"
rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = [
"internal-namespace",
] }
rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = ["internal-namespace"] }
prost-reflect-build = { version = "0.14.0" }
[target.'cfg(windows)'.build-dependencies]
reqwest = { version = "0.12.12", features = ["blocking"] }
zip = "4.0.0"
# enable thunk-rs when compiling for x86_64 or i686 windows
[target.x86_64-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
[target.i686-pc-windows-msvc.build-dependencies]
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
zip = "0.6.6"
[dev-dependencies]
serial_test = "3.0.0"
rstest = "0.18.2"
futures-util = "0.3.30"
maplit = "1.0.2"
[target.'cfg(target_os = "linux")'.dev-dependencies]
defguard_wireguard_rs = "0.4.2"
@@ -294,7 +255,7 @@ full = [
mips = ["aes-gcm", "mimalloc", "wireguard", "tun", "smoltcp", "socks5"]
wireguard = ["dep:boringtun", "dep:ring"]
quic = ["dep:quinn", "dep:rustls", "dep:rcgen"]
mimalloc = ["dep:mimalloc"]
mimalloc = ["dep:mimalloc-rust"]
aes-gcm = ["dep:aes-gcm"]
tun = ["dep:tun"]
websocket = [
@@ -306,4 +267,3 @@ websocket = [
]
smoltcp = ["dep:smoltcp", "dep:parking_lot"]
socks5 = ["dep:smoltcp"]
jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl", "dep:jemalloc-sys"]

View File

@@ -71,8 +71,6 @@ impl WindowsBuild {
if target.contains("x86_64") {
println!("cargo:rustc-link-search=native=easytier/third_party/");
} else if target.contains("i686") {
println!("cargo:rustc-link-search=native=easytier/third_party/i686/");
} else if target.contains("aarch64") {
println!("cargo:rustc-link-search=native=easytier/third_party/arm64/");
}
@@ -127,15 +125,6 @@ fn check_locale() {
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
// enable thunk-rs when target os is windows and arch is x86_64 or i686
#[cfg(target_os = "windows")]
if !std::env::var("TARGET")
.unwrap_or_default()
.contains("aarch64")
{
thunk::thunk();
}
#[cfg(target_os = "windows")]
WindowsBuild::check_for_win();
@@ -146,7 +135,6 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
"src/proto/tests.proto",
"src/proto/cli.proto",
"src/proto/web.proto",
"src/proto/magic_dns.proto",
];
for proto_file in proto_files.iter().chain(proto_files_reflect.iter()) {

View File

@@ -11,8 +11,8 @@ core_clap:
完整URL--config-server udp://127.0.0.1:22020/admin
仅用户名:--config-server admin将使用官方的服务器
config_file:
en: "path to the config file, NOTE: the options set by cmdline args will override options in config file"
zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项"
en: "path to the config file, NOTE: if this is set, all other options will be ignored"
zh-CN: "配置文件路径,注意:如果设置了这个选项,其他所有选项都将被忽略"
network_name:
en: "network name to identify this vpn network"
zh-CN: "用于标识此VPN网络的网络名称"
@@ -149,15 +149,6 @@ core_clap:
disable_kcp_input:
en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved."
zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。"
port_forward:
en: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple."
zh-CN: "将本地端口转发到虚拟网络中的远程端口。例如udp://0.0.0.0:12345/10.126.126.1:23456表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。"
accept_dns:
en: "if true, enable magic dns. with magic dns, you can access other nodes with a domain name, e.g.: <hostname>.et.net. magic dns will modify your system dns settings, enable it carefully."
zh-CN: "如果为true则启用魔法DNS。使用魔法DNS您可以使用域名访问其他节点例如<hostname>.et.net。魔法DNS将修改您的系统DNS设置请谨慎启用。"
private_mode:
en: "if true, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node."
zh-CN: "如果为true则不允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转"
core_app:
panic_backtrace_save:

View File

@@ -1,10 +1,5 @@
use std::io::{Read, Write};
use dashmap::DashMap;
use std::cell::RefCell;
use zstd::stream::read::Decoder;
use zstd::stream::write::Encoder;
use zstd::zstd_safe::{CCtx, DCtx};
use async_compression::tokio::write::{ZstdDecoder, ZstdEncoder};
use tokio::io::AsyncWriteExt;
use zerocopy::{AsBytes as _, FromBytes as _};
@@ -34,20 +29,17 @@ impl DefaultCompressor {
data: &[u8],
compress_algo: CompressorAlgo,
) -> Result<Vec<u8>, Error> {
match compress_algo {
let buf = match compress_algo {
CompressorAlgo::ZstdDefault => {
let ret = CTX_MAP.with(|map_cell| {
let map = map_cell.borrow();
let mut ctx_entry = map.entry(compress_algo).or_default();
let writer = Vec::new();
let mut o = Encoder::with_context(writer, ctx_entry.value_mut());
o.write_all(data)?;
o.finish()
});
Ok(ret?)
let mut o = ZstdEncoder::new(Vec::new());
o.write_all(data).await?;
o.shutdown().await?;
o.into_inner()
}
CompressorAlgo::None => Ok(data.to_vec()),
}
CompressorAlgo::None => data.to_vec(),
};
Ok(buf)
}
pub async fn decompress_raw(
@@ -55,17 +47,17 @@ impl DefaultCompressor {
data: &[u8],
compress_algo: CompressorAlgo,
) -> Result<Vec<u8>, Error> {
match compress_algo {
CompressorAlgo::ZstdDefault => DCTX_MAP.with(|map_cell| {
let map = map_cell.borrow();
let mut ctx_entry = map.entry(compress_algo).or_default();
let mut decoder = Decoder::with_context(data, ctx_entry.value_mut());
let mut output = Vec::new();
decoder.read_to_end(&mut output)?;
Ok(output)
}),
CompressorAlgo::None => Ok(data.to_vec()),
}
let buf = match compress_algo {
CompressorAlgo::ZstdDefault => {
let mut o = ZstdDecoder::new(Vec::new());
o.write_all(data).await?;
o.shutdown().await?;
o.into_inner()
}
CompressorAlgo::None => data.to_vec(),
};
Ok(buf)
}
}
@@ -154,11 +146,6 @@ impl Compressor for DefaultCompressor {
}
}
thread_local! {
static CTX_MAP: RefCell<DashMap<CompressorAlgo, CCtx<'static>>> = RefCell::new(DashMap::new());
static DCTX_MAP: RefCell<DashMap<CompressorAlgo, DCtx<'static>>> = RefCell::new(DashMap::new());
}
#[cfg(test)]
pub mod tests {
use super::*;
@@ -171,21 +158,10 @@ pub mod tests {
let compressor = DefaultCompressor {};
println!(
"Uncompressed packet: {:?}, len: {}",
packet,
packet.payload_len()
);
compressor
.compress(&mut packet, CompressorAlgo::ZstdDefault)
.await
.unwrap();
println!(
"Compressed packet: {:?}, len: {}",
packet,
packet.payload_len()
);
assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), true);
compressor.decompress(&mut packet).await.unwrap();

View File

@@ -7,10 +7,7 @@ use std::{
use anyhow::Context;
use serde::{Deserialize, Serialize};
use crate::{
proto::common::{CompressionAlgoPb, PortForwardConfigPb, SocketType},
tunnel::generate_digest_from_str,
};
use crate::{proto::common::CompressionAlgoPb, tunnel::generate_digest_from_str};
pub type Flags = crate::proto::common::FlagsInConfig;
@@ -36,8 +33,6 @@ pub fn gen_default_flags() -> Flags {
enable_kcp_proxy: false,
disable_kcp_input: false,
disable_relay_kcp: true,
accept_dns: false,
private_mode: false,
}
}
@@ -78,7 +73,7 @@ pub trait ConfigLoader: Send + Sync {
fn get_peers(&self) -> Vec<PeerConfig>;
fn set_peers(&self, peers: Vec<PeerConfig>);
fn get_listeners(&self) -> Option<Vec<url::Url>>;
fn get_listeners(&self) -> Vec<url::Url>;
fn set_listeners(&self, listeners: Vec<url::Url>);
fn get_mapped_listeners(&self) -> Vec<url::Url>;
@@ -102,9 +97,6 @@ pub trait ConfigLoader: Send + Sync {
fn get_socks5_portal(&self) -> Option<url::Url>;
fn set_socks5_portal(&self, addr: Option<url::Url>);
fn get_port_forwards(&self) -> Vec<PortForwardConfig>;
fn set_port_forwards(&self, forwards: Vec<PortForwardConfig>);
fn dump(&self) -> String;
}
@@ -188,41 +180,6 @@ pub struct VpnPortalConfig {
pub wireguard_listen: SocketAddr,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
pub struct PortForwardConfig {
pub bind_addr: SocketAddr,
pub dst_addr: SocketAddr,
pub proto: String,
}
impl From<PortForwardConfigPb> for PortForwardConfig {
fn from(config: PortForwardConfigPb) -> Self {
PortForwardConfig {
bind_addr: config.bind_addr.unwrap_or_default().into(),
dst_addr: config.dst_addr.unwrap_or_default().into(),
proto: match SocketType::try_from(config.socket_type) {
Ok(SocketType::Tcp) => "tcp".to_string(),
Ok(SocketType::Udp) => "udp".to_string(),
_ => "tcp".to_string(),
},
}
}
}
impl Into<PortForwardConfigPb> for PortForwardConfig {
fn into(self) -> PortForwardConfigPb {
PortForwardConfigPb {
bind_addr: Some(self.bind_addr.into()),
dst_addr: Some(self.dst_addr.into()),
socket_type: match self.proto.to_lowercase().as_str() {
"tcp" => SocketType::Tcp as i32,
"udp" => SocketType::Udp as i32,
_ => SocketType::Tcp as i32,
},
}
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct Config {
netns: Option<String>,
@@ -250,8 +207,6 @@ struct Config {
socks5_proxy: Option<url::Url>,
port_forward: Option<Vec<PortForwardConfig>>,
flags: Option<serde_json::Map<String, serde_json::Value>>,
#[serde(skip)]
@@ -276,23 +231,20 @@ impl TomlConfigLoader {
config.flags_struct = Some(Self::gen_flags(config.flags.clone().unwrap_or_default()));
let config = TomlConfigLoader {
Ok(TomlConfigLoader {
config: Arc::new(Mutex::new(config)),
};
let old_ns = config.get_network_identity();
config.set_network_identity(NetworkIdentity::new(
old_ns.network_name,
old_ns.network_secret.unwrap_or_default(),
));
Ok(config)
})
}
pub fn new(config_path: &PathBuf) -> Result<Self, anyhow::Error> {
let config_str = std::fs::read_to_string(config_path)
.with_context(|| format!("failed to read config file: {:?}", config_path))?;
let ret = Self::new_from_str(&config_str)?;
let old_ns = ret.get_network_identity();
ret.set_network_identity(NetworkIdentity::new(
old_ns.network_name,
old_ns.network_secret.unwrap_or_default(),
));
Ok(ret)
}
@@ -515,8 +467,13 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().peer = Some(peers);
}
fn get_listeners(&self) -> Option<Vec<url::Url>> {
self.config.lock().unwrap().listeners.clone()
fn get_listeners(&self) -> Vec<url::Url> {
self.config
.lock()
.unwrap()
.listeners
.clone()
.unwrap_or_default()
}
fn set_listeners(&self, listeners: Vec<url::Url>) {
@@ -577,35 +534,6 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().exit_nodes = Some(nodes);
}
fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>> {
self.config.lock().unwrap().routes.clone()
}
fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>) {
self.config.lock().unwrap().routes = routes;
}
fn get_socks5_portal(&self) -> Option<url::Url> {
self.config.lock().unwrap().socks5_proxy.clone()
}
fn set_socks5_portal(&self, addr: Option<url::Url>) {
self.config.lock().unwrap().socks5_proxy = addr;
}
fn get_port_forwards(&self) -> Vec<PortForwardConfig> {
self.config
.lock()
.unwrap()
.port_forward
.clone()
.unwrap_or_default()
}
fn set_port_forwards(&self, forwards: Vec<PortForwardConfig>) {
self.config.lock().unwrap().port_forward = Some(forwards);
}
fn dump(&self) -> String {
let default_flags_json = serde_json::to_string(&gen_default_flags()).unwrap();
let default_flags_hashmap =
@@ -630,6 +558,22 @@ impl ConfigLoader for TomlConfigLoader {
config.flags = Some(flag_map);
toml::to_string_pretty(&config).unwrap()
}
fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>> {
self.config.lock().unwrap().routes.clone()
}
fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>) {
self.config.lock().unwrap().routes = routes;
}
fn get_socks5_portal(&self) -> Option<url::Url> {
self.config.lock().unwrap().socks5_proxy.clone()
}
fn set_socks5_portal(&self, addr: Option<url::Url>) {
self.config.lock().unwrap().socks5_proxy = addr;
}
}
#[cfg(test)]
@@ -670,11 +614,6 @@ dir = "/tmp/easytier"
[console_logger]
level = "warn"
[[port_forward]]
bind_addr = "0.0.0.0:11011"
dst_addr = "192.168.94.33:11011"
proto = "tcp"
"#;
let ret = TomlConfigLoader::new_from_str(config_str);
if let Err(e) = &ret {
@@ -695,14 +634,6 @@ proto = "tcp"
.collect::<Vec<String>>()
);
assert_eq!(
vec![PortForwardConfig {
bind_addr: "0.0.0.0:11011".parse().unwrap(),
dst_addr: "192.168.94.33:11011".parse().unwrap(),
proto: "tcp".to_string(),
}],
ret.get_port_forwards()
);
println!("{}", ret.dump());
}
}

View File

@@ -1,134 +0,0 @@
use std::net::SocketAddr;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use anyhow::Context;
use hickory_proto::runtime::TokioRuntimeProvider;
use hickory_proto::xfer::Protocol;
use hickory_resolver::config::{LookupIpStrategy, NameServerConfig, ResolverConfig, ResolverOpts};
use hickory_resolver::name_server::{GenericConnector, TokioConnectionProvider};
use hickory_resolver::system_conf::read_system_conf;
use hickory_resolver::{Resolver, TokioResolver};
use once_cell::sync::Lazy;
use tokio::net::lookup_host;
use super::error::Error;
pub fn get_default_resolver_config() -> ResolverConfig {
let mut default_resolve_config = ResolverConfig::new();
default_resolve_config.add_name_server(NameServerConfig::new(
"223.5.5.5:53".parse().unwrap(),
Protocol::Udp,
));
default_resolve_config.add_name_server(NameServerConfig::new(
"180.184.1.1:53".parse().unwrap(),
Protocol::Udp,
));
default_resolve_config
}
pub static ALLOW_USE_SYSTEM_DNS_RESOLVER: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(true));
pub static RESOLVER: Lazy<Arc<Resolver<GenericConnector<TokioRuntimeProvider>>>> =
Lazy::new(|| {
let system_cfg = read_system_conf();
let mut cfg = get_default_resolver_config();
let mut opt = ResolverOpts::default();
if let Ok(s) = system_cfg {
for ns in s.0.name_servers() {
cfg.add_name_server(ns.clone());
}
opt = s.1;
}
opt.ip_strategy = LookupIpStrategy::Ipv4AndIpv6;
let builder = TokioResolver::builder_with_config(cfg, TokioConnectionProvider::default())
.with_options(opt);
Arc::new(builder.build())
});
pub async fn resolve_txt_record(domain_name: &str) -> Result<String, Error> {
let r = RESOLVER.clone();
let response = r.txt_lookup(domain_name).await.with_context(|| {
format!(
"txt_lookup failed, domain_name: {}",
domain_name.to_string()
)
})?;
let txt_record = response.iter().next().with_context(|| {
format!(
"no txt record found, domain_name: {}",
domain_name.to_string()
)
})?;
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
tracing::info!(?txt_data, ?domain_name, "get txt record");
Ok(txt_data.to_string())
}
pub async fn socket_addrs(
url: &url::Url,
default_port_number: impl Fn() -> Option<u16>,
) -> Result<Vec<SocketAddr>, Error> {
let host = url.host_str().ok_or(Error::InvalidUrl(url.to_string()))?;
let port = url
.port()
.or_else(default_port_number)
.ok_or(Error::InvalidUrl(url.to_string()))?;
// if host is an ip address, return it directly
if let Ok(ip) = host.parse::<std::net::IpAddr>() {
return Ok(vec![SocketAddr::new(ip, port)]);
}
if ALLOW_USE_SYSTEM_DNS_RESOLVER.load(std::sync::atomic::Ordering::Relaxed) {
let socket_addr = format!("{}:{}", host, port);
match lookup_host(socket_addr).await {
Ok(a) => {
let a = a.collect();
tracing::debug!(?a, "system dns lookup done");
return Ok(a);
}
Err(e) => {
tracing::error!(?e, "system dns lookup failed");
}
}
}
// use hickory_resolver
let ret = RESOLVER.lookup_ip(host).await.with_context(|| {
format!(
"hickory dns lookup_ip failed, host: {}, port: {}",
host, port
)
})?;
Ok(ret
.iter()
.map(|ip| SocketAddr::new(ip, port))
.collect::<Vec<_>>())
}
#[cfg(test)]
mod tests {
use crate::defer;
use super::*;
#[tokio::test]
async fn test_socket_addrs() {
let url = url::Url::parse("tcp://public.easytier.cn:80").unwrap();
let addrs = socket_addrs(&url, || Some(80)).await.unwrap();
assert_eq!(2, addrs.len(), "addrs: {:?}", addrs);
println!("addrs: {:?}", addrs);
ALLOW_USE_SYSTEM_DNS_RESOLVER.store(false, std::sync::atomic::Ordering::Relaxed);
defer!(
ALLOW_USE_SYSTEM_DNS_RESOLVER.store(true, std::sync::atomic::Ordering::Relaxed);
);
let addrs = socket_addrs(&url, || Some(80)).await.unwrap();
assert_eq!(2, addrs.len(), "addrs: {:?}", addrs);
println!("addrs2: {:?}", addrs);
}
}

View File

@@ -5,7 +5,7 @@ use std::{
};
use crate::proto::cli::PeerConnInfo;
use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb};
use crate::proto::common::PeerFeatureFlag;
use crossbeam::atomic::AtomicCell;
use super::{
@@ -42,8 +42,6 @@ pub enum GlobalCtxEvent {
DhcpIpv4Changed(Option<cidr::Ipv4Inet>, Option<cidr::Ipv4Inet>), // (old, new)
DhcpIpv4Conflicted(Option<cidr::Ipv4Inet>),
PortForwardAdded(PortForwardConfigPb),
}
pub type EventBus = tokio::sync::broadcast::Sender<GlobalCtxEvent>;
@@ -61,11 +59,11 @@ pub struct GlobalCtx {
cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>,
cached_proxy_cidrs: AtomicCell<Option<Vec<cidr::IpCidr>>>,
ip_collector: Mutex<Option<Arc<IPCollector>>>,
ip_collector: Arc<IPCollector>,
hostname: Mutex<String>,
hostname: String,
stun_info_collection: Mutex<Arc<dyn StunInfoCollectorTrait>>,
stun_info_collection: Box<dyn StunInfoCollectorTrait>,
running_listeners: Mutex<Vec<url::Url>>,
@@ -97,7 +95,7 @@ impl GlobalCtx {
let net_ns = NetNS::new(config_fs.get_netns());
let hostname = config_fs.get_hostname();
let (event_bus, _) = tokio::sync::broadcast::channel(8);
let (event_bus, _) = tokio::sync::broadcast::channel(1024);
let stun_info_collection = Arc::new(StunInfoCollector::new_with_default_servers());
@@ -120,14 +118,11 @@ impl GlobalCtx {
cached_ipv4: AtomicCell::new(None),
cached_proxy_cidrs: AtomicCell::new(None),
ip_collector: Mutex::new(Some(Arc::new(IPCollector::new(
net_ns,
stun_info_collection.clone(),
)))),
ip_collector: Arc::new(IPCollector::new(net_ns, stun_info_collection.clone())),
hostname: Mutex::new(hostname),
hostname,
stun_info_collection: Mutex::new(stun_info_collection),
stun_info_collection: Box::new(stun_info_collection),
running_listeners: Mutex::new(Vec::new()),
@@ -144,13 +139,10 @@ impl GlobalCtx {
}
pub fn issue_event(&self, event: GlobalCtxEvent) {
if let Err(e) = self.event_bus.send(event.clone()) {
tracing::warn!(
"Failed to send event: {:?}, error: {:?}, receiver count: {}",
event,
e,
self.event_bus.receiver_count()
);
if self.event_bus.receiver_count() != 0 {
self.event_bus.send(event).unwrap();
} else {
tracing::warn!("No subscriber for event: {:?}", event);
}
}
@@ -218,30 +210,26 @@ impl GlobalCtx {
}
pub fn get_ip_collector(&self) -> Arc<IPCollector> {
self.ip_collector.lock().unwrap().as_ref().unwrap().clone()
self.ip_collector.clone()
}
pub fn get_hostname(&self) -> String {
return self.hostname.lock().unwrap().clone();
return self.hostname.clone();
}
pub fn set_hostname(&self, hostname: String) {
*self.hostname.lock().unwrap() = hostname;
}
pub fn get_stun_info_collector(&self) -> Arc<dyn StunInfoCollectorTrait> {
self.stun_info_collection.lock().unwrap().clone()
pub fn get_stun_info_collector(&self) -> impl StunInfoCollectorTrait + '_ {
self.stun_info_collection.as_ref()
}
pub fn replace_stun_info_collector(&self, collector: Box<dyn StunInfoCollectorTrait>) {
let arc_collector: Arc<dyn StunInfoCollectorTrait> = Arc::new(collector);
*self.stun_info_collection.lock().unwrap() = arc_collector.clone();
// rebuild the ip collector
*self.ip_collector.lock().unwrap() = Some(Arc::new(IPCollector::new(
self.net_ns.clone(),
arc_collector,
)));
// force replace the stun_info_collection without mut and drop the old one
let ptr = &self.stun_info_collection as *const Box<dyn StunInfoCollectorTrait>;
let ptr = ptr as *mut Box<dyn StunInfoCollectorTrait>;
unsafe {
std::ptr::drop_in_place(ptr);
#[allow(invalid_reference_casting)]
std::ptr::write(ptr, collector);
}
}
pub fn get_running_listeners(&self) -> Vec<url::Url> {
@@ -307,10 +295,7 @@ impl GlobalCtx {
#[cfg(test)]
pub mod tests {
use crate::{
common::{config::TomlConfigLoader, new_peer_id, stun::MockStunInfoCollector},
proto::common::NatType,
};
use crate::common::{config::TomlConfigLoader, new_peer_id};
use super::*;
@@ -350,12 +335,7 @@ pub mod tests {
let config_fs = TomlConfigLoader::default();
config_fs.set_inst_name(format!("test_{}", config_fs.get_id()));
config_fs.set_network_identity(network_identy.unwrap_or(NetworkIdentity::default()));
let ctx = Arc::new(GlobalCtx::new(config_fs));
ctx.replace_stun_info_collector(Box::new(MockStunInfoCollector {
udp_nat_type: NatType::Unknown,
}));
ctx
std::sync::Arc::new(GlobalCtx::new(config_fs))
}
pub fn get_mock_global_ctx() -> ArcGlobalCtx {

View File

@@ -12,15 +12,13 @@ impl IfConfiguerTrait for MacIfConfiger {
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
cost: Option<i32>,
) -> Result<(), Error> {
run_shell_cmd(
format!(
"route -n add {} -netmask {} -interface {} -hopcount {}",
"route -n add {} -netmask {} -interface {} -hopcount 7",
address,
cidr_to_subnet_mask(cidr_prefix),
name,
cost.unwrap_or(7)
name
)
.as_str(),
)

View File

@@ -21,7 +21,6 @@ pub trait IfConfiguerTrait: Send + Sync {
_name: &str,
_address: Ipv4Addr,
_cidr_prefix: u8,
_cost: Option<i32>,
) -> Result<(), Error> {
Ok(())
}
@@ -126,6 +125,3 @@ pub type IfConfiger = windows::WindowsIfConfiger;
target_os = "freebsd",
)))]
pub type IfConfiger = DummyIfConfiger;
#[cfg(target_os = "windows")]
pub use windows::RegistryManager;

View File

@@ -350,7 +350,6 @@ impl IfConfiguerTrait for NetlinkIfConfiger {
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
cost: Option<i32>,
) -> Result<(), Error> {
let mut message = RouteMessage::default();
@@ -360,9 +359,7 @@ impl IfConfiguerTrait for NetlinkIfConfiger {
message.header.kind = RouteType::Unicast;
message.header.address_family = AddressFamily::Inet;
// metric
message
.attributes
.push(RouteAttribute::Priority(cost.unwrap_or(65535) as u32));
message.attributes.push(RouteAttribute::Priority(65535));
// output interface
message
.attributes
@@ -553,7 +550,7 @@ mod tests {
ifcfg.set_link_status(DUMMY_IFACE_NAME, true).await.unwrap();
ifcfg
.add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24, None)
.add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24)
.await
.unwrap();

View File

@@ -1,10 +1,6 @@
use std::{io, net::Ipv4Addr};
use std::net::Ipv4Addr;
use async_trait::async_trait;
use winreg::{
enums::{HKEY_LOCAL_MACHINE, KEY_READ, KEY_WRITE},
RegKey,
};
use super::{cidr_to_subnet_mask, run_shell_cmd, Error, IfConfiguerTrait};
@@ -63,18 +59,16 @@ impl IfConfiguerTrait for WindowsIfConfiger {
name: &str,
address: Ipv4Addr,
cidr_prefix: u8,
cost: Option<i32>,
) -> Result<(), Error> {
let Some(idx) = Self::get_interface_index(name) else {
return Err(Error::NotFound);
};
run_shell_cmd(
format!(
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC {}",
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC 9000",
address,
cidr_to_subnet_mask(cidr_prefix),
idx,
cost.unwrap_or(9000)
idx
)
.as_str(),
)
@@ -170,220 +164,3 @@ impl IfConfiguerTrait for WindowsIfConfiger {
.await
}
}
pub struct RegistryManager;
impl RegistryManager {
pub const IPV4_TCPIP_INTERFACE_PREFIX: &str =
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\";
pub const IPV6_TCPIP_INTERFACE_PREFIX: &str =
r"SYSTEM\CurrentControlSet\Services\Tcpip6\Parameters\Interfaces\";
pub const NETBT_INTERFACE_PREFIX: &str =
r"SYSTEM\CurrentControlSet\Services\NetBT\Parameters\Interfaces\Tcpip_";
pub fn reg_delete_obsoleted_items(dev_name: &str) -> io::Result<()> {
use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey};
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let profiles_key = hklm.open_subkey_with_flags(
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles",
KEY_ALL_ACCESS,
)?;
let unmanaged_key = hklm.open_subkey_with_flags(
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Signatures\\Unmanaged",
KEY_ALL_ACCESS,
)?;
// collect subkeys to delete
let mut keys_to_delete = Vec::new();
let mut keys_to_delete_unmanaged = Vec::new();
for subkey_name in profiles_key.enum_keys().filter_map(Result::ok) {
let subkey = profiles_key.open_subkey(&subkey_name)?;
// check if ProfileName contains "et"
match subkey.get_value::<String, _>("ProfileName") {
Ok(profile_name) => {
if profile_name.contains("et_")
|| (!dev_name.is_empty() && dev_name == profile_name)
{
keys_to_delete.push(subkey_name);
}
}
Err(e) => {
tracing::error!(
"Failed to read ProfileName for subkey {}: {}",
subkey_name,
e
);
}
}
}
for subkey_name in unmanaged_key.enum_keys().filter_map(Result::ok) {
let subkey = unmanaged_key.open_subkey(&subkey_name)?;
// check if ProfileName contains "et"
match subkey.get_value::<String, _>("Description") {
Ok(profile_name) => {
if profile_name.contains("et_")
|| (!dev_name.is_empty() && dev_name == profile_name)
{
keys_to_delete_unmanaged.push(subkey_name);
}
}
Err(e) => {
tracing::error!(
"Failed to read ProfileName for subkey {}: {}",
subkey_name,
e
);
}
}
}
// delete collected subkeys
if !keys_to_delete.is_empty() {
for subkey_name in keys_to_delete {
match profiles_key.delete_subkey_all(&subkey_name) {
Ok(_) => tracing::trace!("Successfully deleted subkey: {}", subkey_name),
Err(e) => tracing::error!("Failed to delete subkey {}: {}", subkey_name, e),
}
}
}
if !keys_to_delete_unmanaged.is_empty() {
for subkey_name in keys_to_delete_unmanaged {
match unmanaged_key.delete_subkey_all(&subkey_name) {
Ok(_) => tracing::trace!("Successfully deleted subkey: {}", subkey_name),
Err(e) => tracing::error!("Failed to delete subkey {}: {}", subkey_name, e),
}
}
}
Ok(())
}
pub fn reg_change_catrgory_in_profile(dev_name: &str) -> io::Result<()> {
use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey};
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let profiles_key = hklm.open_subkey_with_flags(
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles",
KEY_ALL_ACCESS,
)?;
for subkey_name in profiles_key.enum_keys().filter_map(Result::ok) {
let subkey = profiles_key.open_subkey_with_flags(&subkey_name, KEY_ALL_ACCESS)?;
match subkey.get_value::<String, _>("ProfileName") {
Ok(profile_name) => {
if !dev_name.is_empty() && dev_name == profile_name {
match subkey.set_value("Category", &1u32) {
Ok(_) => tracing::trace!("Successfully set Category in registry"),
Err(e) => tracing::error!("Failed to set Category in registry: {}", e),
}
}
}
Err(e) => {
tracing::error!(
"Failed to read ProfileName for subkey {}: {}",
subkey_name,
e
);
}
}
}
Ok(())
}
// 根据接口名称查找 GUID
pub fn find_interface_guid(interface_name: &str) -> io::Result<String> {
// 注册表路径:所有网络接口的根目录
let network_key_path =
r"SYSTEM\CurrentControlSet\Control\Network\{4D36E972-E325-11CE-BFC1-08002BE10318}";
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
let network_key = hklm.open_subkey_with_flags(network_key_path, KEY_READ)?;
// 遍历该路径下的所有 GUID 子键
for guid in network_key.enum_keys().map_while(Result::ok) {
if let Ok(guid_key) = network_key.open_subkey_with_flags(&guid, KEY_READ) {
// 检查 Connection/Name 是否匹配目标接口名
if let Ok(conn_key) = guid_key.open_subkey_with_flags("Connection", KEY_READ) {
if let Ok(name) = conn_key.get_value::<String, _>("Name") {
if name == interface_name {
return Ok(guid);
}
}
}
}
}
// 如果没有找到对应的接口
Err(io::Error::new(
io::ErrorKind::NotFound,
"Interface not found",
))
}
// 打开注册表键
pub fn open_interface_key(interface_guid: &str, prefix: &str) -> io::Result<RegKey> {
let path = format!(r"{}{}", prefix, interface_guid);
let hkey_local_machine = RegKey::predef(HKEY_LOCAL_MACHINE);
hkey_local_machine.open_subkey_with_flags(&path, KEY_WRITE)
}
// 禁用动态 DNS 更新
// disableDynamicUpdates sets the appropriate registry values to prevent the
// Windows DHCP client from sending dynamic DNS updates for our interface to
// AD domain controllers.
pub fn disable_dynamic_updates(interface_guid: &str) -> io::Result<()> {
let prefixes = [
Self::IPV4_TCPIP_INTERFACE_PREFIX,
Self::IPV6_TCPIP_INTERFACE_PREFIX,
];
for prefix in &prefixes {
let key = match Self::open_interface_key(interface_guid, prefix) {
Ok(k) => k,
Err(e) => {
// 模拟 mute-key-not-found-if-closing 行为
if matches!(e.kind(), io::ErrorKind::NotFound) {
continue;
} else {
return Err(e);
}
}
};
key.set_value("RegistrationEnabled", &0u32)?;
key.set_value("DisableDynamicUpdate", &1u32)?;
key.set_value("MaxNumberOfAddressesToRegister", &0u32)?;
}
Ok(())
}
// 设置单个 DWORD 值到指定的注册表路径下
fn set_single_dword(
interface_guid: &str,
prefix: &str,
value_name: &str,
data: u32,
) -> io::Result<()> {
let key = match Self::open_interface_key(interface_guid, prefix) {
Ok(k) => k,
Err(e) => {
// 模拟 muteKeyNotFoundIfClosing 行为:忽略 Key Not Found 错误
return if matches!(e.kind(), io::ErrorKind::NotFound) {
Ok(())
} else {
Err(e)
};
}
};
key.set_value(value_name, &data)?;
Ok(())
}
// 禁用 NetBIOS 名称解析请求
pub fn disable_netbios(interface_guid: &str) -> io::Result<()> {
Self::set_single_dword(
interface_guid,
Self::NETBT_INTERFACE_PREFIX,
"NetbiosOptions",
2,
)
}
}

View File

@@ -4,7 +4,6 @@ use std::{
io::Write as _,
sync::{Arc, Mutex},
};
use time::util::refresh_tz;
use tokio::{task::JoinSet, time::timeout};
use tracing::Instrument;
@@ -12,7 +11,6 @@ pub mod compressor;
pub mod config;
pub mod constants;
pub mod defer;
pub mod dns;
pub mod error;
pub mod global_ctx;
pub mod ifcfg;
@@ -25,7 +23,9 @@ pub mod stun_codec_ext;
pub fn get_logger_timer<F: time::formatting::Formattable>(
format: F,
) -> tracing_subscriber::fmt::time::OffsetTime<F> {
refresh_tz();
unsafe {
time::util::local_offset::set_soundness(time::util::local_offset::Soundness::Unsound)
};
let local_offset = time::UtcOffset::current_local_offset()
.unwrap_or(time::UtcOffset::from_whole_seconds(0).unwrap());
tracing_subscriber::fmt::time::OffsetTime::new(local_offset, format)
@@ -108,9 +108,6 @@ pub fn get_machine_id() -> uuid::Uuid {
))]
let gen_mid = machine_uid::get()
.map(|x| {
if x.is_empty() {
return uuid::Uuid::new_v4();
}
let mut b = [0u8; 16];
crate::tunnel::generate_digest_from_str("", x.as_str(), &mut b);
uuid::Uuid::from_bytes(b)

View File

@@ -179,16 +179,18 @@ impl IPCollector {
Self::do_collect_local_ip_addrs(self.net_ns.clone()).await;
let net_ns = self.net_ns.clone();
let stun_info_collector = self.stun_info_collector.clone();
task.spawn(async move {
loop {
let ip_addrs = Self::do_collect_local_ip_addrs(net_ns.clone()).await;
*cached_ip_list.write().await = ip_addrs;
tokio::time::sleep(std::time::Duration::from_secs(CACHED_IP_LIST_TIMEOUT_SEC))
.await;
}
});
let cached_ip_list = self.cached_ip_list.clone();
task.spawn(async move {
let mut last_fetch_iface_time = std::time::Instant::now();
loop {
if last_fetch_iface_time.elapsed().as_secs() > CACHED_IP_LIST_TIMEOUT_SEC {
let ifaces = Self::do_collect_local_ip_addrs(net_ns.clone()).await;
*cached_ip_list.write().await = ifaces;
last_fetch_iface_time = std::time::Instant::now();
}
let stun_info = stun_info_collector.get_stun_info();
for ip in stun_info.public_ip.iter() {
let Ok(ip_addr) = ip.parse::<IpAddr>() else {
@@ -197,20 +199,14 @@ impl IPCollector {
match ip_addr {
IpAddr::V4(v) => {
cached_ip_list.write().await.public_ipv4.replace(v.into());
cached_ip_list.write().await.public_ipv4 = Some(v.into())
}
IpAddr::V6(v) => {
cached_ip_list.write().await.public_ipv6.replace(v.into());
cached_ip_list.write().await.public_ipv6 = Some(v.into())
}
}
}
tracing::debug!(
"got public ip: {:?}, {:?}",
cached_ip_list.read().await.public_ipv4,
cached_ip_list.read().await.public_ipv6
);
let sleep_sec = if !cached_ip_list.read().await.public_ipv4.is_none() {
CACHED_IP_LIST_TIMEOUT_SEC
} else {
@@ -221,10 +217,10 @@ impl IPCollector {
});
}
self.cached_ip_list.read().await.deref().clone()
return self.cached_ip_list.read().await.deref().clone();
}
pub async fn collect_interfaces(net_ns: NetNS, filter: bool) -> Vec<NetworkInterface> {
pub async fn collect_interfaces(net_ns: NetNS) -> Vec<NetworkInterface> {
let _g = net_ns.guard();
let ifaces = pnet::datalink::interfaces();
let mut ret = vec![];
@@ -233,7 +229,7 @@ impl IPCollector {
iface: iface.clone(),
};
if filter && !f.filter_iface().await {
if !f.filter_iface().await {
continue;
}
@@ -247,36 +243,21 @@ impl IPCollector {
async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse {
let mut ret = GetIpListResponse::default();
let ifaces = Self::collect_interfaces(net_ns.clone(), true).await;
let ifaces = Self::collect_interfaces(net_ns.clone()).await;
let _g = net_ns.guard();
for iface in ifaces {
for ip in iface.ips {
let ip: std::net::IpAddr = ip.ip();
if ip.is_loopback() || ip.is_multicast() {
continue;
}
match ip {
std::net::IpAddr::V4(v4) => {
if ip.is_loopback() || ip.is_multicast() {
continue;
}
ret.interface_ipv4s.push(v4.into());
}
_ => {}
}
}
}
let ifaces = Self::collect_interfaces(net_ns.clone(), false).await;
let _g = net_ns.guard();
for iface in ifaces {
for ip in iface.ips {
let ip: std::net::IpAddr = ip.ip();
match ip {
std::net::IpAddr::V6(v6) => {
if v6.is_multicast() || v6.is_loopback() || v6.is_unicast_link_local() {
continue;
}
ret.interface_ipv6s.push(v6.into());
}
_ => {}
}
}
}

View File

@@ -8,6 +8,8 @@ use crate::proto::common::{NatType, StunInfo};
use anyhow::Context;
use chrono::Local;
use crossbeam::atomic::AtomicCell;
use hickory_resolver::config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts};
use hickory_resolver::TokioAsyncResolver;
use rand::seq::IteratorRandom;
use tokio::net::{lookup_host, UdpSocket};
use tokio::sync::{broadcast, Mutex};
@@ -20,9 +22,45 @@ use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder};
use crate::common::error::Error;
use super::dns::resolve_txt_record;
use super::stun_codec_ext::*;
pub fn get_default_resolver_config() -> ResolverConfig {
let mut default_resolve_config = ResolverConfig::new();
default_resolve_config.add_name_server(NameServerConfig::new(
"223.5.5.5:53".parse().unwrap(),
Protocol::Udp,
));
default_resolve_config.add_name_server(NameServerConfig::new(
"180.184.1.1:53".parse().unwrap(),
Protocol::Udp,
));
default_resolve_config
}
pub async fn resolve_txt_record(
domain_name: &str,
resolver: &TokioAsyncResolver,
) -> Result<String, Error> {
let response = resolver.txt_lookup(domain_name).await.with_context(|| {
format!(
"txt_lookup failed, domain_name: {}",
domain_name.to_string()
)
})?;
let txt_record = response.iter().next().with_context(|| {
format!(
"no txt record found, domain_name: {}",
domain_name.to_string()
)
})?;
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
tracing::info!(?txt_data, ?domain_name, "get txt record");
Ok(txt_data.to_string())
}
struct HostResolverIter {
hostnames: Vec<String>,
ips: Vec<SocketAddr>,
@@ -41,7 +79,10 @@ impl HostResolverIter {
}
async fn get_txt_record(domain_name: &str) -> Result<Vec<String>, Error> {
let txt_data = resolve_txt_record(domain_name).await?;
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap_or(
TokioAsyncResolver::tokio(get_default_resolver_config(), ResolverOpts::default()),
);
let txt_data = resolve_txt_record(domain_name, &resolver).await?;
Ok(txt_data.split(" ").map(|x| x.to_string()).collect())
}
@@ -761,10 +802,7 @@ impl StunInfoCollector {
async fn get_public_ipv6(servers: &Vec<String>) -> Option<Ipv6Addr> {
let mut ips = HostResolverIter::new(servers.to_vec(), 10, true);
while let Some(ip) = ips.next().await {
let Ok(udp_socket) = UdpSocket::bind(format!("[::]:0")).await else {
break;
};
let udp = Arc::new(udp_socket);
let udp = Arc::new(UdpSocket::bind(format!("[::]:0")).await.unwrap());
let ret = StunClientBuilder::new(udp.clone())
.new_stun_client(ip)
.bind_request(false, false)
@@ -890,7 +928,7 @@ impl StunInfoCollectorTrait for MockStunInfoCollector {
last_update_time: std::time::Instant::now().elapsed().as_secs() as i64,
min_port: 100,
max_port: 200,
public_ip: vec!["127.0.0.1".to_string(), "::1".to_string()],
public_ip: vec!["127.0.0.1".to_string()],
}
}

View File

@@ -12,31 +12,28 @@ use std::{
};
use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait, PeerId},
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
peers::{
peer_conn::PeerConnId,
peer_manager::PeerManager,
peer_rpc::PeerRpcManager,
peer_manager::PeerManager, peer_rpc::PeerRpcManager,
peer_rpc_service::DirectConnectorManagerRpcServer,
peer_task::{PeerTaskLauncher, PeerTaskManager},
},
proto::{
peer_rpc::{
DirectConnectorRpc, DirectConnectorRpcClientFactory, DirectConnectorRpcServer,
GetIpListRequest, GetIpListResponse, SendV6HolePunchPacketRequest,
GetIpListRequest, GetIpListResponse,
},
rpc_types::controller::BaseController,
},
tunnel::{udp::UdpTunnelConnector, IpVersion},
};
use crate::proto::cli::PeerConnInfo;
use anyhow::Context;
use rand::Rng;
use tokio::{net::UdpSocket, task::JoinSet, time::timeout};
use tokio::{task::JoinSet, time::timeout};
use tracing::Instrument;
use url::Host;
use super::{create_connector_by_url, udp_hole_punch};
use super::create_connector_by_url;
pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1;
pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300;
@@ -79,7 +76,7 @@ impl PeerManagerForDirectConnector for PeerManager {
struct DstBlackListItem(PeerId, String);
#[derive(Hash, Eq, PartialEq, Clone)]
struct DstListenerUrlBlackListItem(PeerId, String);
struct DstListenerUrlBlackListItem(PeerId, url::Url);
struct DirectConnectorManagerData {
global_ctx: ArcGlobalCtx,
@@ -95,114 +92,95 @@ impl DirectConnectorManagerData {
dst_listener_blacklist: timedmap::TimedMap::new(),
}
}
}
async fn remote_send_v6_hole_punch_packet(
&self,
dst_peer_id: PeerId,
local_socket: &UdpSocket,
remote_url: &url::Url,
) -> Result<(), Error> {
let global_ctx = self.peer_manager.get_global_ctx();
let listener_port = remote_url.port().ok_or(anyhow::anyhow!(
"failed to parse port from remote url: {}",
remote_url
))?;
let connector_ip = global_ctx
.get_stun_info_collector()
.get_stun_info()
.public_ip
.iter()
.find(|x| x.contains(":"))
.ok_or(anyhow::anyhow!(
"failed to get public ipv6 address from stun info"
))?
.parse::<std::net::Ipv6Addr>()
.with_context(|| {
format!(
"failed to parse public ipv6 address from stun info: {:?}",
global_ctx.get_stun_info_collector().get_stun_info()
)
})?;
let connector_addr = SocketAddr::new(
std::net::IpAddr::V6(connector_ip),
local_socket.local_addr()?.port(),
);
impl std::fmt::Debug for DirectConnectorManagerData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DirectConnectorManagerData")
.field("peer_manager", &self.peer_manager)
.finish()
}
}
let rpc_stub = self
pub struct DirectConnectorManager {
global_ctx: ArcGlobalCtx,
data: Arc<DirectConnectorManagerData>,
tasks: JoinSet<()>,
}
impl DirectConnectorManager {
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
Self {
global_ctx: global_ctx.clone(),
data: Arc::new(DirectConnectorManagerData::new(global_ctx, peer_manager)),
tasks: JoinSet::new(),
}
}
pub fn run(&mut self) {
if self.global_ctx.get_flags().disable_p2p {
return;
}
self.run_as_server();
self.run_as_client();
}
pub fn run_as_server(&mut self) {
self.data
.peer_manager
.get_peer_rpc_mgr()
.rpc_client()
.scoped_client::<DirectConnectorRpcClientFactory<BaseController>>(
self.peer_manager.my_peer_id(),
dst_peer_id,
global_ctx.get_network_name(),
);
rpc_stub
.send_v6_hole_punch_packet(
BaseController::default(),
SendV6HolePunchPacketRequest {
listener_port: listener_port as u32,
connector_addr: Some(connector_addr.into()),
},
)
.await
.with_context(|| {
format!(
"do rpc, send v6 hole punch packet to peer {} at {}",
dst_peer_id, remote_url
)
})?;
Ok(())
.rpc_server()
.registry()
.register(
DirectConnectorRpcServer::new(DirectConnectorManagerRpcServer::new(
self.global_ctx.clone(),
)),
&self.data.global_ctx.get_network_name(),
);
}
async fn connect_to_public_ipv6(
&self,
pub fn run_as_client(&mut self) {
let data = self.data.clone();
let my_peer_id = self.data.peer_manager.my_peer_id();
self.tasks.spawn(
async move {
loop {
let peers = data.peer_manager.list_peers().await;
let mut tasks = JoinSet::new();
for peer_id in peers {
if peer_id == my_peer_id
|| data.peer_manager.has_directly_connected_conn(peer_id)
{
continue;
}
tasks.spawn(Self::do_try_direct_connect(data.clone(), peer_id));
}
while let Some(task_ret) = tasks.join_next().await {
tracing::debug!(?task_ret, ?my_peer_id, "direct connect task ret");
}
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
}
}
.instrument(
tracing::info_span!("direct_connector_client", my_id = ?self.global_ctx.id),
),
);
}
async fn do_try_connect_to_ip(
data: Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
remote_url: &url::Url,
) -> Result<(PeerId, PeerConnId), Error> {
let local_socket = Arc::new(
UdpSocket::bind("[::]:0")
.await
.with_context(|| format!("failed to bind local socket for {}", remote_url))?,
);
// ask remote to send v6 hole punch packet
// and no matter what the result is, continue to connect
let _ = self
.remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, &remote_url)
.await;
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
let remote_addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
&remote_url,
"udp",
IpVersion::V6,
addr: String,
) -> Result<(), Error> {
let connector = create_connector_by_url(&addr, &data.global_ctx).await?;
let (peer_id, conn_id) = timeout(
std::time::Duration::from_secs(3),
data.peer_manager.try_direct_connect(connector),
)
.await?;
let ret = udp_connector
.try_connect_with_socket(local_socket, remote_addr)
.await?;
// NOTICE: must add as directly connected tunnel
self.peer_manager.add_direct_tunnel(ret).await
}
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
let connector = create_connector_by_url(&addr, &self.global_ctx, IpVersion::Both).await?;
let remote_url = connector.remote_url();
let (peer_id, conn_id) =
if remote_url.scheme() == "udp" && matches!(remote_url.host(), Some(Host::Ipv6(_))) {
self.connect_to_public_ipv6(dst_peer_id, &remote_url)
.await?
} else {
timeout(
std::time::Duration::from_secs(3),
self.peer_manager.try_direct_connect(connector),
)
.await??
};
.await??;
if peer_id != dst_peer_id && !TESTING.load(Ordering::Relaxed) {
tracing::info!(
@@ -211,7 +189,7 @@ impl DirectConnectorManagerData {
dst_peer_id,
peer_id
);
self.peer_manager
data.peer_manager
.get_peer_map()
.close_peer_conn(peer_id, &conn_id)
.await?;
@@ -221,44 +199,21 @@ impl DirectConnectorManagerData {
Ok(())
}
#[tracing::instrument(skip(self))]
#[tracing::instrument]
async fn try_connect_to_ip(
self: Arc<DirectConnectorManagerData>,
data: Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
addr: String,
) -> Result<(), Error> {
let mut rand_gen = rand::rngs::OsRng::default();
let backoff_ms = vec![1000, 2000, 4000];
let backoff_ms = vec![1000, 2000];
let mut backoff_idx = 0;
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start");
self.dst_listener_blacklist.cleanup();
if self
.dst_listener_blacklist
.contains(&DstListenerUrlBlackListItem(
dst_peer_id.clone(),
addr.clone(),
))
{
return Err(Error::UrlInBlacklist);
}
loop {
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
return Ok(());
}
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start one round");
let ret = self.do_try_connect_to_ip(dst_peer_id, addr.clone()).await;
let ret = Self::do_try_connect_to_ip(data.clone(), dst_peer_id, addr.clone()).await;
tracing::debug!(?ret, ?dst_peer_id, ?addr, "try_connect_to_ip return");
if ret.is_ok() {
return Ok(());
}
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
return Ok(());
if matches!(ret, Err(Error::UrlInBlacklist) | Ok(_)) {
return ret;
}
if backoff_idx < backoff_ms.len() {
@@ -274,29 +229,49 @@ impl DirectConnectorManagerData {
backoff_idx += 1;
continue;
} else {
self.dst_listener_blacklist.insert(
DstListenerUrlBlackListItem(dst_peer_id.clone(), addr),
(),
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
);
return ret;
}
}
}
fn spawn_direct_connect_task(
self: &Arc<DirectConnectorManagerData>,
#[tracing::instrument]
async fn do_try_direct_connect_internal(
data: Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
ip_list: &GetIpListResponse,
listener: &url::Url,
tasks: &mut JoinSet<Result<(), Error>>,
) {
let Ok(mut addrs) = listener.socket_addrs(|| None) else {
tracing::error!(?listener, "failed to parse socket address from listener");
return;
};
let listener_host = addrs.pop();
tracing::info!(?listener_host, ?listener, "try direct connect to peer");
ip_list: GetIpListResponse,
) -> Result<(), Error> {
data.dst_listener_blacklist.cleanup();
let enable_ipv6 = data.global_ctx.get_flags().enable_ipv6;
let available_listeners = ip_list
.listeners
.into_iter()
.map(Into::<url::Url>::into)
.filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None })
.filter(|l| l.port().is_some() && l.host().is_some())
.filter(|l| {
!data
.dst_listener_blacklist
.contains(&DstListenerUrlBlackListItem(dst_peer_id.clone(), l.clone()))
})
.filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_)))
.collect::<Vec<_>>();
tracing::debug!(?available_listeners, "got available listeners");
if available_listeners.is_empty() {
return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into());
}
// if have default listener, use it first
let listener = available_listeners
.iter()
.find(|l| l.scheme() == data.global_ctx.get_flags().default_protocol)
.unwrap_or(available_listeners.get(0).unwrap());
let mut tasks = bounded_join_set::JoinSet::new(2);
let listener_host = listener.socket_addrs(|| None).unwrap().pop();
match listener_host {
Some(SocketAddr::V4(s_addr)) => {
if s_addr.ip().is_unspecified() {
@@ -308,7 +283,7 @@ impl DirectConnectorManagerData {
let mut addr = (*listener).clone();
if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
data.clone(),
dst_peer_id.clone(),
addr.to_string(),
));
@@ -323,7 +298,7 @@ impl DirectConnectorManagerData {
});
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
data.clone(),
dst_peer_id.clone(),
listener.to_string(),
));
@@ -354,7 +329,7 @@ impl DirectConnectorManagerData {
.is_ok()
{
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
data.clone(),
dst_peer_id.clone(),
addr.to_string(),
));
@@ -369,7 +344,7 @@ impl DirectConnectorManagerData {
});
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
tasks.spawn(Self::try_connect_to_ip(
self.clone(),
data.clone(),
dst_peer_id.clone(),
listener.to_string(),
));
@@ -379,230 +354,64 @@ impl DirectConnectorManagerData {
tracing::error!(?p, ?listener, "failed to parse ip version from listener");
}
}
}
#[tracing::instrument(skip(self))]
async fn do_try_direct_connect_internal(
self: &Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
ip_list: GetIpListResponse,
) -> Result<(), Error> {
let enable_ipv6 = self.global_ctx.get_flags().enable_ipv6;
let available_listeners = ip_list
.listeners
.clone()
.into_iter()
.map(Into::<url::Url>::into)
.filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None })
.filter(|l| l.port().is_some() && l.host().is_some())
.filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_)))
.collect::<Vec<_>>();
tracing::debug!(?available_listeners, "got available listeners");
if available_listeners.is_empty() {
return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into());
}
let default_protocol = self.global_ctx.get_flags().default_protocol;
// sort available listeners, default protocol has the highest priority, udp is second, others just random
// highest priority is in the last
let mut available_listeners = available_listeners;
available_listeners.sort_by_key(|l| {
let scheme = l.scheme();
if scheme == default_protocol {
3
} else if scheme == "udp" {
2
} else {
1
}
});
while !available_listeners.is_empty() {
let mut tasks = JoinSet::new();
let mut listener_list = vec![];
let cur_scheme = available_listeners.last().unwrap().scheme().to_owned();
while let Some(listener) = available_listeners.last() {
if listener.scheme() != cur_scheme {
let mut has_succ = false;
while let Some(ret) = tasks.join_next().await {
match ret {
Ok(Ok(_)) => {
has_succ = true;
tracing::info!(
?dst_peer_id,
?listener,
"try direct connect to peer success"
);
break;
}
tracing::debug!("try direct connect to peer with listener: {}", listener);
self.spawn_direct_connect_task(
dst_peer_id.clone(),
&ip_list,
&listener,
&mut tasks,
);
listener_list.push(listener.clone().to_string());
available_listeners.pop();
Ok(Err(e)) => {
tracing::info!(?e, "try direct connect to peer failed");
}
Err(e) => {
tracing::error!(?e, "try direct connect to peer task join failed");
}
}
}
let ret = tasks.join_all().await;
tracing::debug!(
?ret,
?dst_peer_id,
?cur_scheme,
?listener_list,
"all tasks finished for current scheme"
if !has_succ {
data.dst_listener_blacklist.insert(
DstListenerUrlBlackListItem(dst_peer_id.clone(), listener.clone()),
(),
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
);
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
tracing::info!(
"direct connect to peer {} success, has direct conn",
dst_peer_id
);
return Ok(());
}
}
Ok(())
}
#[tracing::instrument(skip(self))]
#[tracing::instrument]
async fn do_try_direct_connect(
self: Arc<DirectConnectorManagerData>,
data: Arc<DirectConnectorManagerData>,
dst_peer_id: PeerId,
) -> Result<(), Error> {
let mut backoff =
udp_hole_punch::BackOff::new(vec![1000, 2000, 2000, 5000, 5000, 10000, 30000, 60000]);
loop {
let peer_manager = self.peer_manager.clone();
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
let peer_manager = data.peer_manager.clone();
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
let rpc_stub = peer_manager
.get_peer_rpc_mgr()
.rpc_client()
.scoped_client::<DirectConnectorRpcClientFactory<BaseController>>(
let rpc_stub = peer_manager
.get_peer_rpc_mgr()
.rpc_client()
.scoped_client::<DirectConnectorRpcClientFactory<BaseController>>(
peer_manager.my_peer_id(),
dst_peer_id,
self.global_ctx.get_network_name(),
data.global_ctx.get_network_name(),
);
let ip_list = rpc_stub
.get_ip_list(BaseController::default(), GetIpListRequest {})
.await
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?;
tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list");
let ret = self
.do_try_direct_connect_internal(dst_peer_id, ip_list)
.await;
tracing::info!(?ret, ?dst_peer_id, "do_try_direct_connect return");
if peer_manager.has_directly_connected_conn(dst_peer_id) {
tracing::info!(
"direct connect to peer {} success, has direct conn",
dst_peer_id
);
return Ok(());
}
tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await;
}
}
}
impl std::fmt::Debug for DirectConnectorManagerData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DirectConnectorManagerData")
.field("peer_manager", &self.peer_manager)
.finish()
}
}
pub struct DirectConnectorManager {
global_ctx: ArcGlobalCtx,
data: Arc<DirectConnectorManagerData>,
client: PeerTaskManager<DirectConnectorLauncher>,
tasks: JoinSet<()>,
}
#[derive(Clone)]
struct DirectConnectorLauncher(Arc<DirectConnectorManagerData>);
#[async_trait::async_trait]
impl PeerTaskLauncher for DirectConnectorLauncher {
type Data = Arc<DirectConnectorManagerData>;
type CollectPeerItem = PeerId;
type TaskRet = ();
fn new_data(&self, _peer_mgr: Arc<PeerManager>) -> Self::Data {
self.0.clone()
}
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
let my_peer_id = data.peer_manager.my_peer_id();
data.peer_manager
.list_peers()
let ip_list = rpc_stub
.get_ip_list(BaseController::default(), GetIpListRequest {})
.await
.into_iter()
.filter(|peer_id| {
*peer_id != my_peer_id && !data.peer_manager.has_directly_connected_conn(*peer_id)
})
.collect()
}
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?;
async fn launch_task(
&self,
data: &Self::Data,
item: Self::CollectPeerItem,
) -> tokio::task::JoinHandle<Result<Self::TaskRet, anyhow::Error>> {
let data = data.clone();
tokio::spawn(async move { data.do_try_direct_connect(item).await.map_err(Into::into) })
}
tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list");
async fn all_task_done(&self, _data: &Self::Data) {}
fn loop_interval_ms(&self) -> u64 {
5000
}
}
impl DirectConnectorManager {
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
let data = Arc::new(DirectConnectorManagerData::new(
global_ctx.clone(),
peer_manager.clone(),
));
let client = PeerTaskManager::new(DirectConnectorLauncher(data.clone()), peer_manager);
Self {
global_ctx,
data,
client,
tasks: JoinSet::new(),
}
}
pub fn run(&mut self) {
if self.global_ctx.get_flags().disable_p2p {
return;
}
self.run_as_server();
self.run_as_client();
}
pub fn run_as_server(&mut self) {
self.data
.peer_manager
.get_peer_rpc_mgr()
.rpc_server()
.registry()
.register(
DirectConnectorRpcServer::new(DirectConnectorManagerRpcServer::new(
self.global_ctx.clone(),
)),
&self.data.global_ctx.get_network_name(),
);
}
pub fn run_as_client(&mut self) {
self.client.start();
Self::do_try_direct_connect_internal(data, dst_peer_id, ip_list).await
}
}
@@ -681,13 +490,6 @@ mod tests {
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
p_c.get_global_ctx()
.get_ip_collector()
.collect_ip_addrs()
.await;
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
let mut dm_a = DirectConnectorManager::new(p_a.get_global_ctx(), p_a.clone());
let mut dm_c = DirectConnectorManager::new(p_c.get_global_ctx(), p_c.clone());
@@ -722,7 +524,6 @@ mod tests {
#[tokio::test]
async fn direct_connector_scheme_blacklist() {
TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
let p_a = create_mock_peer_manager().await;
let data = Arc::new(DirectConnectorManagerData::new(
p_a.get_global_ctx(),
@@ -737,7 +538,7 @@ mod tests {
.interface_ipv4s
.push("127.0.0.1".parse::<std::net::Ipv4Addr>().unwrap().into());
data.do_try_direct_connect_internal(1, ip_list.clone())
DirectConnectorManager::do_try_direct_connect_internal(data.clone(), 1, ip_list.clone())
.await
.unwrap();

View File

@@ -2,15 +2,19 @@ use std::{net::SocketAddr, sync::Arc};
use crate::{
common::{
dns::{resolve_txt_record, RESOLVER},
error::Error,
global_ctx::ArcGlobalCtx,
stun::{get_default_resolver_config, resolve_txt_record},
},
tunnel::{IpVersion, Tunnel, TunnelConnector, TunnelError, PROTO_PORT_OFFSET},
};
use anyhow::Context;
use dashmap::DashSet;
use hickory_resolver::proto::rr::rdata::SRV;
use hickory_resolver::{
config::{ResolverConfig, ResolverOpts},
proto::rr::rdata::SRV,
TokioAsyncResolver,
};
use rand::{seq::SliceRandom, Rng as _};
use crate::proto::common::TunnelInfo;
@@ -39,6 +43,9 @@ pub struct DNSTunnelConnector {
bind_addrs: Vec<SocketAddr>,
global_ctx: ArcGlobalCtx,
ip_version: IpVersion,
default_resolve_config: ResolverConfig,
default_resolve_opts: ResolverOpts,
}
impl DNSTunnelConnector {
@@ -48,6 +55,9 @@ impl DNSTunnelConnector {
bind_addrs: Vec::new(),
global_ctx,
ip_version: IpVersion::Both,
default_resolve_config: get_default_resolver_config(),
default_resolve_opts: ResolverOpts::default(),
}
}
@@ -56,7 +66,12 @@ impl DNSTunnelConnector {
&self,
domain_name: &str,
) -> Result<Box<dyn TunnelConnector>, Error> {
let txt_data = resolve_txt_record(domain_name)
let resolver =
TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio(
self.default_resolve_config.clone(),
self.default_resolve_opts.clone(),
));
let txt_data = resolve_txt_record(domain_name, &resolver)
.await
.with_context(|| format!("resolve txt record failed, domain_name: {}", domain_name))?;
@@ -76,8 +91,8 @@ impl DNSTunnelConnector {
)
})?;
let connector =
create_connector_by_url(url.as_str(), &self.global_ctx, self.ip_version).await?;
let mut connector = create_connector_by_url(url.as_str(), &self.global_ctx).await?;
connector.set_ip_version(self.ip_version);
Ok(connector)
}
@@ -111,6 +126,12 @@ impl DNSTunnelConnector {
) -> Result<Box<dyn TunnelConnector>, Error> {
tracing::info!("handle_srv_record: {}", domain_name);
let resolver =
TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio(
self.default_resolve_config.clone(),
self.default_resolve_opts.clone(),
));
let srv_domains = PROTO_PORT_OFFSET
.iter()
.map(|(p, _)| (format!("_easytier._{}.{}", p, domain_name), *p)) // _easytier._udp.{domain_name}
@@ -120,7 +141,7 @@ impl DNSTunnelConnector {
let srv_lookup_tasks = srv_domains
.iter()
.map(|(srv_domain, protocol)| {
let resolver = RESOLVER.clone();
let resolver = resolver.clone();
let responses = responses.clone();
async move {
let response = resolver.srv_lookup(srv_domain).await.with_context(|| {
@@ -158,8 +179,8 @@ impl DNSTunnelConnector {
)
})?;
let connector =
create_connector_by_url(url.as_str(), &self.global_ctx, self.ip_version).await?;
let mut connector = create_connector_by_url(url.as_str(), &self.global_ctx).await?;
connector.set_ip_version(self.ip_version);
Ok(connector)
}
}
@@ -221,18 +242,8 @@ mod tests {
let url = "txt://txt.easytier.cn";
let global_ctx = get_mock_global_ctx();
let mut connector = DNSTunnelConnector::new(url.parse().unwrap(), global_ctx);
connector.set_ip_version(IpVersion::V4);
for _ in 0..5 {
match connector.connect().await {
Ok(ret) => {
println!("{:?}", ret.info());
return;
}
Err(e) => {
println!("{:?}", e);
}
}
}
let ret = connector.connect().await.unwrap();
println!("{:?}", ret.info());
}
#[tokio::test]
@@ -240,17 +251,7 @@ mod tests {
let url = "srv://easytier.cn";
let global_ctx = get_mock_global_ctx();
let mut connector = DNSTunnelConnector::new(url.parse().unwrap(), global_ctx);
connector.set_ip_version(IpVersion::V4);
for _ in 0..5 {
match connector.connect().await {
Ok(ret) => {
println!("{:?}", ret.info());
return;
}
Err(e) => {
println!("{:?}", e);
}
}
}
let ret = connector.connect().await.unwrap();
println!("{:?}", ret.info());
}
}

View File

@@ -92,24 +92,14 @@ impl HttpTunnelConnector {
if !query.is_empty() {
tracing::info!("try to create connector by url: {}", query[0]);
self.redirect_type = HttpRedirectType::RedirectToQuery;
return create_connector_by_url(
&query[0].to_string(),
&self.global_ctx,
self.ip_version,
)
.await;
return create_connector_by_url(&query[0].to_string(), &self.global_ctx).await;
} else if let Some(new_url) = url_str
.strip_prefix(format!("{}://", url.scheme()).as_str())
.and_then(|x| Url::parse(x).ok())
{
// stripe the scheme and create connector by url
self.redirect_type = HttpRedirectType::RedirectToUrl;
return create_connector_by_url(
new_url.as_str(),
&self.global_ctx,
self.ip_version,
)
.await;
return create_connector_by_url(new_url.as_str(), &self.global_ctx).await;
}
return Err(Error::InvalidUrl(format!(
"no valid connector url found in url: {}",
@@ -117,8 +107,7 @@ impl HttpTunnelConnector {
)));
} else {
self.redirect_type = HttpRedirectType::RedirectToUrl;
return create_connector_by_url(new_url.as_str(), &self.global_ctx, self.ip_version)
.await;
return create_connector_by_url(new_url.as_str(), &self.global_ctx).await;
}
}
@@ -148,7 +137,7 @@ impl HttpTunnelConnector {
continue;
}
self.redirect_type = HttpRedirectType::BodyUrls;
return create_connector_by_url(line, &self.global_ctx, self.ip_version).await;
return create_connector_by_url(line, &self.global_ctx).await;
}
Err(Error::InvalidUrl(format!(

View File

@@ -3,10 +3,7 @@ use std::{collections::BTreeSet, sync::Arc};
use anyhow::Context;
use dashmap::{DashMap, DashSet};
use tokio::{
sync::{
broadcast::{error::RecvError, Receiver},
mpsc, Mutex,
},
sync::{broadcast::Receiver, mpsc, Mutex},
task::JoinSet,
time::timeout,
};
@@ -109,7 +106,7 @@ impl ManualConnectorManager {
}
pub async fn add_connector_by_url(&self, url: &str) -> Result<(), Error> {
self.add_connector(create_connector_by_url(url, &self.global_ctx, IpVersion::Both).await?);
self.add_connector(create_connector_by_url(url, &self.global_ctx).await?);
Ok(())
}
@@ -182,37 +179,8 @@ impl ManualConnectorManager {
mut event_recv: Receiver<GlobalCtxEvent>,
) {
loop {
match event_recv.recv().await {
Ok(event) => {
Self::handle_event(&event, &data).await;
}
Err(RecvError::Lagged(n)) => {
tracing::warn!("event_recv lagged: {}, rebuild alive conn list", n);
event_recv = event_recv.resubscribe();
data.alive_conn_urls.clear();
for x in data
.peer_manager
.get_peer_map()
.get_alive_conns()
.iter()
.map(|x| {
x.tunnel
.clone()
.unwrap_or_default()
.remote_addr
.unwrap_or_default()
.to_string()
})
{
data.alive_conn_urls.insert(x);
}
continue;
}
Err(RecvError::Closed) => {
tracing::warn!("event_recv closed, exit");
break;
}
}
let event = event_recv.recv().await.expect("event_recv got error");
Self::handle_event(&event, &data).await;
}
}
@@ -303,6 +271,7 @@ impl ManualConnectorManager {
async fn collect_dead_conns(data: Arc<ConnectorManagerData>) -> BTreeSet<String> {
Self::handle_remove_connector(data.clone());
let all_urls: BTreeSet<String> = data
.connectors
.iter()

View File

@@ -13,7 +13,7 @@ use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, network::IPCollector},
tunnel::{
check_scheme_and_get_socket_addr, ring::RingTunnelConnector, tcp::TcpTunnelConnector,
udp::UdpTunnelConnector, IpVersion, TunnelConnector,
udp::UdpTunnelConnector, TunnelConnector,
},
};
@@ -43,8 +43,8 @@ async fn set_bind_addr_for_peer_connector(
connector.set_bind_addrs(bind_addrs);
} else {
let mut bind_addrs = vec![];
for ipv6 in ips.interface_ipv6s.iter().chain(ips.public_ipv6.iter()) {
let socket_addr = SocketAddrV6::new(std::net::Ipv6Addr::from(*ipv6), 0, 0, 0).into();
for ipv6 in ips.interface_ipv6s {
let socket_addr = SocketAddrV6::new(ipv6.into(), 0, 0, 0).into();
bind_addrs.push(socket_addr);
}
connector.set_bind_addrs(bind_addrs);
@@ -55,13 +55,11 @@ async fn set_bind_addr_for_peer_connector(
pub async fn create_connector_by_url(
url: &str,
global_ctx: &ArcGlobalCtx,
ip_version: IpVersion,
) -> Result<Box<dyn TunnelConnector + 'static>, Error> {
let url = url::Url::parse(url).map_err(|_| Error::InvalidUrl(url.to_owned()))?;
let mut connector: Box<dyn TunnelConnector + 'static> = match url.scheme() {
match url.scheme() {
"tcp" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp", ip_version).await?;
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp")?;
let mut connector = TcpTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
@@ -71,11 +69,10 @@ pub async fn create_connector_by_url(
)
.await;
}
Box::new(connector)
return Ok(Box::new(connector));
}
"udp" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp", ip_version).await?;
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp")?;
let mut connector = UdpTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
@@ -85,21 +82,20 @@ pub async fn create_connector_by_url(
)
.await;
}
Box::new(connector)
return Ok(Box::new(connector));
}
"http" | "https" => {
let connector = HttpTunnelConnector::new(url, global_ctx.clone());
Box::new(connector)
return Ok(Box::new(connector));
}
"ring" => {
check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring", IpVersion::Both).await?;
check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring")?;
let connector = RingTunnelConnector::new(url);
Box::new(connector)
return Ok(Box::new(connector));
}
#[cfg(feature = "quic")]
"quic" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic", ip_version).await?;
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic")?;
let mut connector = QUICTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
@@ -109,12 +105,11 @@ pub async fn create_connector_by_url(
)
.await;
}
Box::new(connector)
return Ok(Box::new(connector));
}
#[cfg(feature = "wireguard")]
"wg" => {
let dst_addr =
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg", ip_version).await?;
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg")?;
let nid = global_ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
@@ -129,12 +124,12 @@ pub async fn create_connector_by_url(
)
.await;
}
Box::new(connector)
return Ok(Box::new(connector));
}
#[cfg(feature = "websocket")]
"ws" | "wss" => {
use crate::tunnel::FromUrl;
let dst_addr = SocketAddr::from_url(url.clone(), ip_version).await?;
use crate::tunnel::{FromUrl, IpVersion};
let dst_addr = SocketAddr::from_url(url.clone(), IpVersion::Both)?;
let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url);
if global_ctx.config.get_flags().bind_device {
set_bind_addr_for_peer_connector(
@@ -144,17 +139,14 @@ pub async fn create_connector_by_url(
)
.await;
}
Box::new(connector)
return Ok(Box::new(connector));
}
"txt" | "srv" => {
let connector = dns_connector::DNSTunnelConnector::new(url, global_ctx.clone());
Box::new(connector)
return Ok(Box::new(connector));
}
_ => {
return Err(Error::InvalidUrl(url.into()));
}
};
connector.set_ip_version(ip_version);
Ok(connector)
}
}

View File

@@ -56,8 +56,8 @@ impl From<NatType> for UdpNatType {
fn from(nat_type: NatType) -> Self {
match nat_type {
NatType::Unknown => UdpNatType::Unknown,
NatType::OpenInternet => UdpNatType::Open(nat_type),
NatType::NoPat | NatType::FullCone | NatType::Restricted | NatType::PortRestricted => {
NatType::NoPat | NatType::OpenInternet => UdpNatType::Open(nat_type),
NatType::FullCone | NatType::Restricted | NatType::PortRestricted => {
UdpNatType::Cone(nat_type)
}
NatType::Symmetric | NatType::SymUdpFirewall => UdpNatType::HardSymmetric(nat_type),
@@ -495,7 +495,6 @@ impl PunchHoleServerCommon {
.udp_nat_type
}
#[async_recursion::async_recursion]
pub(crate) async fn select_listener(
&self,
use_new_listener: bool,
@@ -516,28 +515,24 @@ impl PunchHoleServerCommon {
let mut locked = all_listener_sockets.lock().await;
let listener = if use_last {
Some(locked.last_mut()?)
locked.last_mut()?
} else {
// use the listener that is active most recently
locked
.iter_mut()
.filter(|l| !l.mapped_addr.ip().is_unspecified())
.max_by_key(|listener| listener.last_active_time.load())
.max_by_key(|listener| listener.last_active_time.load())?
};
if listener.is_none() || listener.as_ref().unwrap().mapped_addr.ip().is_unspecified() {
tracing::warn!(
?use_new_listener,
"no available udp hole punching listener with mapped address"
);
if !use_new_listener {
return self.select_listener(true).await;
} else {
return None;
}
if listener.mapped_addr.ip().is_unspecified() {
tracing::info!("listener mapped addr is unspecified, trying to get mapped addr");
listener.mapped_addr = self
.get_global_ctx()
.get_stun_info_collector()
.get_udp_port_mapping(listener.mapped_addr.port())
.await
.ok()?;
}
let listener = listener.unwrap();
Some((listener.get_socket().await, listener.mapped_addr))
}

View File

@@ -143,7 +143,7 @@ impl UdpHolePunchRpc for UdpHolePunchServer {
}
#[derive(Debug)]
pub struct BackOff {
struct BackOff {
backoffs_ms: Vec<u64>,
current_idx: usize,
}

View File

@@ -434,7 +434,7 @@ impl PunchSymToConeHoleClient {
let public_ips: Vec<Ipv4Addr> = stun_info
.public_ip
.iter()
.filter_map(|x| x.parse().ok())
.map(|x| x.parse().unwrap())
.collect();
if public_ips.is_empty() {
return Err(anyhow::anyhow!("failed to get public ips"));

View File

@@ -3,14 +3,12 @@ use std::{
fmt::Write,
net::{IpAddr, SocketAddr},
path::PathBuf,
str::FromStr,
sync::Mutex,
time::Duration,
vec,
};
use anyhow::Context;
use cidr::Ipv4Inet;
use clap::{command, Args, Parser, Subcommand};
use humansize::format_size;
use service_manager::*;
@@ -53,15 +51,6 @@ struct Cli {
#[arg(short, long, default_value = "false", help = "verbose output")]
verbose: bool,
#[arg(
short = 'o',
long = "output",
value_enum,
default_value = "table",
help = "output format"
)]
output_format: OutputFormat,
#[command(subcommand)]
sub_command: SubCommand,
}
@@ -88,23 +77,23 @@ enum SubCommand {
Proxy,
}
#[derive(clap::ValueEnum, Debug, Clone, PartialEq)]
enum OutputFormat {
Table,
Json,
}
#[derive(Args, Debug)]
struct PeerArgs {
#[command(subcommand)]
sub_command: Option<PeerSubCommand>,
}
#[derive(Args, Debug)]
struct PeerListArgs {
#[arg(short, long)]
verbose: bool,
}
#[derive(Subcommand, Debug)]
enum PeerSubCommand {
Add,
Remove,
List,
List(PeerListArgs),
ListForeign,
ListGlobalForeign,
}
@@ -204,15 +193,14 @@ struct InstallArgs {
type Error = anyhow::Error;
struct CommandHandler<'a> {
struct CommandHandler {
client: Mutex<RpcClient>,
verbose: bool,
output_format: &'a OutputFormat,
}
type RpcClient = StandAloneClient<TcpTunnelConnector>;
impl CommandHandler<'_> {
impl CommandHandler {
async fn get_peer_manager_client(
&self,
) -> Result<Box<dyn PeerManageRpc<Controller = BaseController>>, Error> {
@@ -306,12 +294,9 @@ impl CommandHandler<'_> {
println!("remove peer");
}
async fn handle_peer_list(&self) -> Result<(), Error> {
#[derive(tabled::Tabled, serde::Serialize)]
async fn handle_peer_list(&self, _args: &PeerArgs) -> Result<(), Error> {
#[derive(tabled::Tabled)]
struct PeerTableItem {
#[tabled(rename = "ipv4")]
cidr: String,
#[tabled(skip)]
ipv4: String,
hostname: String,
cost: String,
@@ -329,12 +314,7 @@ impl CommandHandler<'_> {
fn from(p: PeerRoutePair) -> Self {
let route = p.route.clone().unwrap_or_default();
PeerTableItem {
cidr: route.ipv4_addr.map(|ip| ip.to_string()).unwrap_or_default(),
ipv4: route
.ipv4_addr
.map(|ip: easytier::proto::common::Ipv4Inet| ip.address.unwrap_or_default())
.map(|ip| ip.to_string())
.unwrap_or_default(),
ipv4: route.ipv4_addr.map(|ip| ip.to_string()).unwrap_or_default(),
hostname: route.hostname.clone(),
cost: cost_to_str(route.cost),
lat_ms: if route.cost == 1 {
@@ -364,10 +344,7 @@ impl CommandHandler<'_> {
impl From<NodeInfo> for PeerTableItem {
fn from(p: NodeInfo) -> Self {
PeerTableItem {
cidr: p.ipv4_addr.clone(),
ipv4: Ipv4Inet::from_str(&p.ipv4_addr)
.map(|ip| ip.address().to_string())
.unwrap_or_default(),
ipv4: p.ipv4_addr.clone(),
hostname: p.hostname.clone(),
cost: "Local".to_string(),
lat_ms: "-".to_string(),
@@ -389,7 +366,7 @@ impl CommandHandler<'_> {
let mut items: Vec<PeerTableItem> = vec![];
let peer_routes = self.list_peer_route_pair().await?;
if self.verbose {
println!("{}", serde_json::to_string_pretty(&peer_routes)?);
println!("{:#?}", peer_routes);
return Ok(());
}
@@ -405,7 +382,7 @@ impl CommandHandler<'_> {
items.push(p.into());
}
print_output(&items, self.output_format)?;
println!("{}", tabled::Table::new(items).with(Style::modern()));
Ok(())
}
@@ -427,9 +404,8 @@ impl CommandHandler<'_> {
.list_foreign_network(BaseController::default(), request)
.await?;
let network_map = response;
if self.verbose || *self.output_format == OutputFormat::Json {
let json = serde_json::to_string_pretty(&network_map.foreign_networks)?;
println!("{}", json);
if self.verbose {
println!("{:#?}", network_map);
return Ok(());
}
@@ -469,11 +445,8 @@ impl CommandHandler<'_> {
let response = client
.list_global_foreign_network(BaseController::default(), request)
.await?;
if self.verbose || *self.output_format == OutputFormat::Json {
println!(
"{}",
serde_json::to_string_pretty(&response.foreign_networks)?
);
if self.verbose {
println!("{:#?}", response);
return Ok(());
}
@@ -491,7 +464,7 @@ impl CommandHandler<'_> {
}
async fn handle_route_list(&self) -> Result<(), Error> {
#[derive(tabled::Tabled, serde::Serialize)]
#[derive(tabled::Tabled)]
struct RouteTableItem {
ipv4: String,
hostname: String,
@@ -518,23 +491,6 @@ impl CommandHandler<'_> {
.await?
.node_info
.ok_or(anyhow::anyhow!("node info not found"))?;
let peer_routes = self.list_peer_route_pair().await?;
if self.verbose {
#[derive(serde::Serialize)]
struct VerboseItem {
node_info: NodeInfo,
peer_routes: Vec<PeerRoutePair>,
}
println!(
"{}",
serde_json::to_string_pretty(&VerboseItem {
node_info,
peer_routes
})?
);
return Ok(());
}
items.push(RouteTableItem {
ipv4: node_info.ipv4_addr.clone(),
@@ -554,6 +510,7 @@ impl CommandHandler<'_> {
version: node_info.version.clone(),
});
let peer_routes = self.list_peer_route_pair().await?;
for p in peer_routes.iter() {
let Some(next_hop_pair) = peer_routes.iter().find(|pair| {
pair.route.clone().unwrap_or_default().peer_id
@@ -677,7 +634,7 @@ impl CommandHandler<'_> {
}
}
print_output(&items, self.output_format)?;
println!("{}", tabled::Table::new(items).with(Style::modern()));
Ok(())
}
@@ -688,10 +645,6 @@ impl CommandHandler<'_> {
let response = client
.list_connector(BaseController::default(), request)
.await?;
if self.verbose || *self.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&response.connectors)?);
return Ok(());
}
println!("response: {:#?}", response);
Ok(())
}
@@ -959,21 +912,6 @@ impl Service {
}
}
fn print_output<T>(items: &[T], format: &OutputFormat) -> Result<(), Error>
where
T: tabled::Tabled + serde::Serialize,
{
match format {
OutputFormat::Table => {
println!("{}", tabled::Table::new(items).with(Style::modern()));
}
OutputFormat::Json => {
println!("{}", serde_json::to_string_pretty(items)?);
}
}
Ok(())
}
#[tokio::main]
#[tracing::instrument]
async fn main() -> Result<(), Error> {
@@ -986,7 +924,6 @@ async fn main() -> Result<(), Error> {
let handler = CommandHandler {
client: Mutex::new(client),
verbose: cli.verbose,
output_format: &cli.output_format,
};
match cli.sub_command {
@@ -997,8 +934,12 @@ async fn main() -> Result<(), Error> {
Some(PeerSubCommand::Remove) => {
println!("remove peer");
}
Some(PeerSubCommand::List) => {
handler.handle_peer_list().await?;
Some(PeerSubCommand::List(arg)) => {
if arg.verbose {
println!("{:#?}", handler.list_peer_route_pair().await?);
} else {
handler.handle_peer_list(&peer_args).await?;
}
}
Some(PeerSubCommand::ListForeign) => {
handler.handle_foreign_network_list().await?;
@@ -1007,7 +948,7 @@ async fn main() -> Result<(), Error> {
handler.handle_global_foreign_network_list().await?;
}
None => {
handler.handle_peer_list().await?;
handler.handle_peer_list(&peer_args).await?;
}
},
SubCommand::Connector(conn_args) => match conn_args.sub_command {
@@ -1034,14 +975,7 @@ async fn main() -> Result<(), Error> {
loop {
let ret = collector.get_stun_info();
if ret.udp_nat_type != NatType::Unknown as i32 {
if cli.output_format == OutputFormat::Json {
match serde_json::to_string_pretty(&ret) {
Ok(json) => println!("{}", json),
Err(e) => eprintln!("Error serializing to JSON: {}", e),
}
} else {
println!("stun info: {:#?}", ret);
}
println!("stun info: {:#?}", ret);
break;
}
tokio::time::sleep(Duration::from_millis(200)).await;
@@ -1059,45 +993,27 @@ async fn main() -> Result<(), Error> {
)
.await?;
#[derive(tabled::Tabled, serde::Serialize)]
#[derive(tabled::Tabled)]
struct PeerCenterTableItem {
node_id: String,
#[tabled(rename = "direct_peers")]
#[serde(skip_serializing)]
direct_peers_str: String,
#[tabled(skip)]
direct_peers: Vec<DirectPeerItem>,
}
#[derive(serde::Serialize)]
struct DirectPeerItem {
node_id: String,
latency_ms: i32,
direct_peers: String,
}
let mut table_rows = vec![];
for (k, v) in resp.global_peer_map.iter() {
let node_id = k;
let direct_peers_strs = v
let direct_peers = v
.direct_peers
.iter()
.map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,))
.collect::<Vec<_>>();
let direct_peers: Vec<_> = v.direct_peers
.iter()
.map(|(k, v)| DirectPeerItem {
node_id: k.to_string(),
latency_ms: v.latency_ms,
})
.collect();
table_rows.push(PeerCenterTableItem {
node_id: node_id.to_string(),
direct_peers_str: direct_peers_strs.join("\n"),
direct_peers,
direct_peers: direct_peers.join("\n"),
});
}
print_output(&table_rows, &cli.output_format)?;
println!("{}", tabled::Table::new(table_rows).with(Style::modern()));
}
SubCommand::VpnPortal => {
let vpn_portal_client = handler.get_vpn_portal_client().await?;
@@ -1129,11 +1045,6 @@ async fn main() -> Result<(), Error> {
.ok_or(anyhow::anyhow!("node info not found"))?;
match sub_cmd.sub_command {
Some(NodeSubCommand::Info) | None => {
if cli.verbose || cli.output_format == OutputFormat::Json {
println!("{}", serde_json::to_string_pretty(&node_info)?);
return Ok(());
}
let stun_info = node_info.stun_info.clone().unwrap_or_default();
let ip_list = node_info.ip_list.clone().unwrap_or_default();
@@ -1275,12 +1186,7 @@ async fn main() -> Result<(), Error> {
.await;
entries.extend(ret.unwrap_or_default().entries);
if cli.verbose {
println!("{}", serde_json::to_string_pretty(&entries)?);
return Ok(());
}
#[derive(tabled::Tabled, serde::Serialize)]
#[derive(tabled::Tabled)]
struct TableItem {
src: String,
dst: String,
@@ -1309,7 +1215,7 @@ async fn main() -> Result<(), Error> {
})
.collect::<Vec<_>>();
print_output(&table_rows, &cli.output_format)?;
println!("{}", tabled::Table::new(table_rows).with(Style::modern()));
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -45,13 +45,11 @@ impl IpPacket {
// make sure the fragment doesn't overlap with existing fragments
for f in &self.fragments {
if f.offset <= fragment.offset && fragment.offset < f.offset + f.data.len() as u16 {
tracing::trace!("fragment overlap 1, f.offset = {}, fragment.offset = {}, f.data.len() = {}, fragment.data.len() = {}", f.offset, fragment.offset, f.data.len(), fragment.data.len());
return;
}
if fragment.offset <= f.offset
&& f.offset < fragment.offset + fragment.data.len() as u16
{
tracing::trace!("fragment overlap 2, f.offset = {}, fragment.offset = {}, f.data.len() = {}, fragment.data.len() = {}", f.offset, fragment.offset, f.data.len(), fragment.data.len());
return;
}
}
@@ -153,13 +151,6 @@ impl IpReassembler {
id,
};
tracing::trace!(
?key,
"add fragment, offset = {}, total_length = {}",
fragment.offset,
total_length
);
let mut entry = self.packets.entry(key.clone()).or_insert_with(|| {
let packet = IpPacket::new(source, destination);
let timestamp = Instant::now();

View File

@@ -106,8 +106,8 @@ async fn handle_kcp_output(
#[derive(Debug, Clone)]
pub struct NatDstKcpConnector {
pub(crate) kcp_endpoint: Arc<KcpEndpoint>,
pub(crate) peer_mgr: Arc<PeerManager>,
kcp_endpoint: Arc<KcpEndpoint>,
peer_mgr: Arc<PeerManager>,
}
#[async_trait::async_trait]
@@ -299,10 +299,6 @@ impl KcpProxySrc {
pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstKcpConnector>> {
self.tcp_proxy.0.clone()
}
pub fn get_kcp_endpoint(&self) -> Arc<KcpEndpoint> {
self.kcp_endpoint.clone()
}
}
pub struct KcpProxyDst {

View File

@@ -1,17 +1,10 @@
use std::{
net::{IpAddr, Ipv4Addr, SocketAddr},
sync::{Arc, Weak},
time::{Duration, Instant},
sync::Arc,
time::Duration,
};
use crossbeam::atomic::AtomicCell;
use kcp_sys::{endpoint::KcpEndpoint, stream::KcpStream};
use crate::{
common::{
config::PortForwardConfig, global_ctx::GlobalCtxEvent, join_joinset_background,
scoped_task::ScopedTask,
},
gateway::{
fast_socks5::{
server::{
@@ -19,22 +12,19 @@ use crate::{
},
util::stream::tcp_connect_with_timeout,
},
ip_reassembler::IpReassembler,
kcp_proxy::NatDstKcpConnector,
tokio_smoltcp::{channel_device, Net, NetConfig},
tokio_smoltcp::TcpStream,
},
tunnel::packet_def::{PacketType, ZCPacket},
tunnel::packet_def::PacketType,
};
use anyhow::Context;
use dashmap::DashMap;
use pnet::packet::{
ip::IpNextHeaderProtocols, ipv4::Ipv4Packet, tcp::TcpPacket, udp::UdpPacket, Packet,
};
use dashmap::DashSet;
use pnet::packet::{ip::IpNextHeaderProtocols, ipv4::Ipv4Packet, tcp::TcpPacket, Packet};
use tokio::{
io::{AsyncRead, AsyncWrite},
net::TcpListener,
net::UdpSocket,
select,
};
use tokio::{
net::TcpListener,
sync::{mpsc, Mutex},
task::JoinSet,
time::timeout,
@@ -42,36 +32,14 @@ use tokio::{
use crate::{
common::{error::Error, global_ctx::GlobalCtx},
gateway::tokio_smoltcp::{channel_device, Net, NetConfig},
peers::{peer_manager::PeerManager, PeerPacketFilter},
tunnel::packet_def::ZCPacket,
};
use super::tcp_proxy::NatDstConnector as _;
enum SocksUdpSocket {
UdpSocket(Arc<tokio::net::UdpSocket>),
SmolUdpSocket(super::tokio_smoltcp::UdpSocket),
}
impl SocksUdpSocket {
pub async fn send_to(&self, buf: &[u8], addr: SocketAddr) -> Result<usize, std::io::Error> {
match self {
SocksUdpSocket::UdpSocket(socket) => socket.send_to(buf, addr).await,
SocksUdpSocket::SmolUdpSocket(socket) => socket.send_to(buf, addr).await,
}
}
pub async fn recv_from(&self, buf: &mut [u8]) -> Result<(usize, SocketAddr), std::io::Error> {
match self {
SocksUdpSocket::UdpSocket(socket) => socket.recv_from(buf).await,
SocksUdpSocket::SmolUdpSocket(socket) => socket.recv_from(buf).await,
}
}
}
enum SocksTcpStream {
TcpStream(tokio::net::TcpStream),
SmolTcpStream(super::tokio_smoltcp::TcpStream),
KcpStream(KcpStream),
SmolTcpStream(TcpStream),
}
impl AsyncRead for SocksTcpStream {
@@ -87,9 +55,6 @@ impl AsyncRead for SocksTcpStream {
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_read(cx, buf)
}
SocksTcpStream::KcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_read(cx, buf)
}
}
}
}
@@ -107,9 +72,6 @@ impl AsyncWrite for SocksTcpStream {
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_write(cx, buf)
}
SocksTcpStream::KcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_write(cx, buf)
}
}
}
@@ -122,7 +84,6 @@ impl AsyncWrite for SocksTcpStream {
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_flush(cx)
}
SocksTcpStream::KcpStream(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
}
}
@@ -137,121 +98,17 @@ impl AsyncWrite for SocksTcpStream {
SocksTcpStream::SmolTcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_shutdown(cx)
}
SocksTcpStream::KcpStream(ref mut stream) => {
std::pin::Pin::new(stream).poll_shutdown(cx)
}
}
}
}
enum Socks5EntryData {
Tcp(TcpListener), // hold a binded socket to hold the tcp port
Udp((Arc<SocksUdpSocket>, UdpClientKey)), // hold the socket to send data to dst
}
const UDP_ENTRY: u8 = 1;
const TCP_ENTRY: u8 = 2;
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
struct Socks5Entry {
src: SocketAddr,
dst: SocketAddr,
entry_type: u8,
}
type Socks5EntrySet = Arc<DashMap<Socks5Entry, Socks5EntryData>>;
struct SmolTcpConnector {
net: Arc<Net>,
entries: Socks5EntrySet,
current_entry: std::sync::Mutex<Option<Socks5Entry>>,
}
#[async_trait::async_trait]
impl AsyncTcpConnector for SmolTcpConnector {
type S = SocksTcpStream;
async fn tcp_connect(
&self,
addr: SocketAddr,
timeout_s: u64,
) -> crate::gateway::fast_socks5::Result<SocksTcpStream> {
let tmp_listener = TcpListener::bind("0.0.0.0:0").await?;
let local_addr = self.net.get_address();
let port = tmp_listener.local_addr()?.port();
let entry = Socks5Entry {
src: SocketAddr::new(local_addr, port),
dst: addr,
entry_type: TCP_ENTRY,
};
*self.current_entry.lock().unwrap() = Some(entry.clone());
self.entries
.insert(entry, Socks5EntryData::Tcp(tmp_listener));
if addr.ip() == local_addr {
let modified_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), addr.port());
Ok(SocksTcpStream::TcpStream(
tcp_connect_with_timeout(modified_addr, timeout_s).await?,
))
} else {
let remote_socket = timeout(
Duration::from_secs(timeout_s),
self.net.tcp_connect(addr, port),
)
.await
.with_context(|| "connect to remote timeout")?;
Ok(SocksTcpStream::SmolTcpStream(remote_socket.map_err(
|e| super::fast_socks5::SocksError::Other(e.into()),
)?))
}
}
}
impl Drop for SmolTcpConnector {
fn drop(&mut self) {
if let Some(entry) = self.current_entry.lock().unwrap().take() {
self.entries.remove(&entry);
}
}
}
struct Socks5KcpConnector {
kcp_endpoint: Weak<KcpEndpoint>,
peer_mgr: Weak<PeerManager>,
src_addr: SocketAddr,
}
#[async_trait::async_trait]
impl AsyncTcpConnector for Socks5KcpConnector {
type S = SocksTcpStream;
async fn tcp_connect(
&self,
addr: SocketAddr,
_timeout_s: u64,
) -> crate::gateway::fast_socks5::Result<SocksTcpStream> {
let Some(kcp_endpoint) = self.kcp_endpoint.upgrade() else {
return Err(anyhow::anyhow!("kcp endpoint is not ready").into());
};
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer mgr is not ready").into());
};
let c = NatDstKcpConnector {
kcp_endpoint,
peer_mgr,
};
println!("connect to kcp endpoint, addr = {:?}", addr);
let ret = c
.connect(self.src_addr, addr)
.await
.map_err(|e| super::fast_socks5::SocksError::Other(e.into()))?;
Ok(SocksTcpStream::KcpStream(ret))
}
}
type Socks5EntrySet = Arc<DashSet<Socks5Entry>>;
struct Socks5ServerNet {
ipv4_addr: cidr::Ipv4Inet,
@@ -273,7 +130,7 @@ impl Socks5ServerNet {
) -> Self {
let mut forward_tasks = JoinSet::new();
let mut cap = smoltcp::phy::DeviceCapabilities::default();
cap.max_transmission_unit = 1284; // 1284 - 20 can be divided by 8 (fragment offset unit)
cap.max_transmission_unit = 1280;
cap.medium = smoltcp::phy::Medium::Ip;
let (dev, stack_sink, mut stack_stream) = channel_device::ChannelDevice::new(cap);
@@ -294,8 +151,7 @@ impl Socks5ServerNet {
while let Some(data) = stack_stream.recv().await {
tracing::trace!(
?data,
"receive from smoltcp stack and send to peer mgr packet, len = {}",
data.len()
"receive from smoltcp stack and send to peer mgr packet"
);
let Some(ipv4) = Ipv4Packet::new(&data) else {
tracing::error!(?data, "smoltcp stack stream get non ipv4 packet");
@@ -341,14 +197,69 @@ impl Socks5ServerNet {
config.set_skip_auth(false);
config.set_allow_no_auth(true);
struct SmolTcpConnector(
Arc<Net>,
Socks5EntrySet,
std::sync::Mutex<Option<Socks5Entry>>,
);
#[async_trait::async_trait]
impl AsyncTcpConnector for SmolTcpConnector {
type S = SocksTcpStream;
async fn tcp_connect(
&self,
addr: SocketAddr,
timeout_s: u64,
) -> crate::gateway::fast_socks5::Result<SocksTcpStream> {
let local_addr = self.0.get_address();
let port = self.0.get_port();
let entry = Socks5Entry {
src: SocketAddr::new(local_addr, port),
dst: addr,
};
*self.2.lock().unwrap() = Some(entry.clone());
self.1.insert(entry);
if addr.ip() == local_addr {
let modified_addr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), addr.port());
Ok(SocksTcpStream::TcpStream(
tcp_connect_with_timeout(modified_addr, timeout_s).await?,
))
} else {
let remote_socket = timeout(
Duration::from_secs(timeout_s),
self.0.tcp_connect(addr, port),
)
.await
.with_context(|| "connect to remote timeout")?;
Ok(SocksTcpStream::SmolTcpStream(remote_socket.map_err(
|e| super::fast_socks5::SocksError::Other(e.into()),
)?))
}
}
}
impl Drop for SmolTcpConnector {
fn drop(&mut self) {
if let Some(entry) = self.2.lock().unwrap().take() {
self.1.remove(&entry);
}
}
}
let socket = Socks5Socket::new(
stream,
Arc::new(config),
SmolTcpConnector {
net: self.smoltcp_net.clone(),
entries: self.entries.clone(),
current_entry: std::sync::Mutex::new(None),
},
SmolTcpConnector(
self.smoltcp_net.clone(),
self.entries.clone(),
std::sync::Mutex::new(None),
),
);
self.forward_tasks.lock().unwrap().spawn(async move {
@@ -364,38 +275,17 @@ impl Socks5ServerNet {
}
}
struct UdpClientInfo {
client_addr: SocketAddr,
port_holder_socket: Arc<UdpSocket>,
local_addr: SocketAddr,
last_active: AtomicCell<Instant>,
entries: Socks5EntrySet,
entry_key: Socks5Entry,
}
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
struct UdpClientKey {
client_addr: SocketAddr,
dst_addr: SocketAddr,
}
pub struct Socks5Server {
global_ctx: Arc<GlobalCtx>,
peer_manager: Arc<PeerManager>,
auth: Option<SimpleUserPassword>,
tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
tasks: Arc<Mutex<JoinSet<()>>>,
packet_sender: mpsc::Sender<ZCPacket>,
packet_recv: Arc<Mutex<mpsc::Receiver<ZCPacket>>>,
net: Arc<Mutex<Option<Socks5ServerNet>>>,
entries: Socks5EntrySet,
tcp_forward_task: Arc<std::sync::Mutex<JoinSet<()>>>,
udp_client_map: Arc<DashMap<UdpClientKey, Arc<UdpClientInfo>>>,
udp_forward_task: Arc<DashMap<UdpClientKey, ScopedTask<()>>>,
kcp_endpoint: Mutex<Option<Weak<KcpEndpoint>>>,
}
#[async_trait::async_trait]
@@ -409,65 +299,22 @@ impl PeerPacketFilter for Socks5Server {
let payload_bytes = packet.payload();
let ipv4 = Ipv4Packet::new(payload_bytes).unwrap();
if ipv4.get_version() != 4 {
if ipv4.get_version() != 4 || ipv4.get_next_level_protocol() != IpNextHeaderProtocols::Tcp {
return Some(packet);
}
let entry_key = match ipv4.get_next_level_protocol() {
IpNextHeaderProtocols::Tcp => {
let tcp_packet = TcpPacket::new(ipv4.payload()).unwrap();
Socks5Entry {
dst: SocketAddr::new(ipv4.get_source().into(), tcp_packet.get_source()),
src: SocketAddr::new(
ipv4.get_destination().into(),
tcp_packet.get_destination(),
),
entry_type: TCP_ENTRY,
}
}
IpNextHeaderProtocols::Udp => {
if IpReassembler::is_packet_fragmented(&ipv4) && !self.entries.is_empty() {
let ipv4_src: IpAddr = ipv4.get_source().into();
// only send to smoltcp if the ipv4 src is in the entries
let is_in_entries = self.entries.iter().any(|x| x.key().dst.ip() == ipv4_src);
tracing::trace!(
?is_in_entries,
"ipv4 src = {:?}, check need send both smoltcp and kernel tun",
ipv4_src
);
if is_in_entries {
// if the packet is fragmented, no matther what the payload is, need send it to both smoltcp and kernel tun. because
// we cannot determine the udp port of the packet.
let _ = self.packet_sender.try_send(packet.clone()).ok();
}
return Some(packet);
}
let udp_packet = UdpPacket::new(ipv4.payload()).unwrap();
Socks5Entry {
dst: SocketAddr::new(ipv4.get_source().into(), udp_packet.get_source()),
src: SocketAddr::new(
ipv4.get_destination().into(),
udp_packet.get_destination(),
),
entry_type: UDP_ENTRY,
}
}
_ => {
return Some(packet);
}
let tcp_packet = TcpPacket::new(ipv4.payload()).unwrap();
let entry = Socks5Entry {
dst: SocketAddr::new(ipv4.get_source().into(), tcp_packet.get_source()),
src: SocketAddr::new(ipv4.get_destination().into(), tcp_packet.get_destination()),
};
if !self.entries.contains_key(&entry_key) {
if !self.entries.contains(&entry) {
return Some(packet);
}
tracing::trace!(?entry_key, ?ipv4, "socks5 found entry for packet from peer");
let _ = self.packet_sender.try_send(packet).ok();
None
return None;
}
}
@@ -483,18 +330,12 @@ impl Socks5Server {
peer_manager,
auth,
tasks: Arc::new(std::sync::Mutex::new(JoinSet::new())),
tasks: Arc::new(Mutex::new(JoinSet::new())),
packet_recv: Arc::new(Mutex::new(packet_recv)),
packet_sender,
net: Arc::new(Mutex::new(None)),
entries: Arc::new(DashMap::new()),
tcp_forward_task: Arc::new(std::sync::Mutex::new(JoinSet::new())),
udp_client_map: Arc::new(DashMap::new()),
udp_forward_task: Arc::new(DashMap::new()),
kcp_endpoint: Mutex::new(None),
entries: Arc::new(DashSet::new()),
})
}
@@ -504,9 +345,7 @@ impl Socks5Server {
let peer_manager = self.peer_manager.clone();
let packet_recv = self.packet_recv.clone();
let entries = self.entries.clone();
let tcp_forward_task = self.tcp_forward_task.clone();
let udp_client_map = self.udp_client_map.clone();
self.tasks.lock().unwrap().spawn(async move {
self.tasks.lock().await.spawn(async move {
let mut prev_ipv4 = None;
loop {
let mut event_recv = global_ctx.subscribe();
@@ -514,10 +353,7 @@ impl Socks5Server {
let cur_ipv4 = global_ctx.get_ipv4();
if prev_ipv4 != cur_ipv4 {
prev_ipv4 = cur_ipv4;
entries.clear();
tcp_forward_task.lock().unwrap().abort_all();
udp_client_map.clear();
if cur_ipv4.is_none() {
let _ = net.lock().await.take();
@@ -540,356 +376,43 @@ impl Socks5Server {
});
}
pub async fn run(
self: &Arc<Self>,
kcp_endpoint: Option<Weak<KcpEndpoint>>,
) -> Result<(), Error> {
*self.kcp_endpoint.lock().await = kcp_endpoint;
let mut need_start = false;
if let Some(proxy_url) = self.global_ctx.config.get_socks5_portal() {
let bind_addr = format!(
"{}:{}",
proxy_url.host_str().unwrap(),
proxy_url.port().unwrap()
);
let listener = {
let _g = self.global_ctx.net_ns.guard();
TcpListener::bind(bind_addr.parse::<SocketAddr>().unwrap()).await?
};
let net = self.net.clone();
self.tasks.lock().unwrap().spawn(async move {
loop {
match listener.accept().await {
Ok((socket, _addr)) => {
tracing::info!("accept a new connection, {:?}", socket);
if let Some(net) = net.lock().await.as_ref() {
net.handle_tcp_stream(socket);
}
}
Err(err) => tracing::error!("accept error = {:?}", err),
}
}
});
join_joinset_background(self.tasks.clone(), "socks5 server".to_string());
need_start = true;
pub async fn run(self: &Arc<Self>) -> Result<(), Error> {
let Some(proxy_url) = self.global_ctx.config.get_socks5_portal() else {
return Ok(());
};
for port_forward in self.global_ctx.config.get_port_forwards() {
self.add_port_forward(port_forward).await?;
need_start = true;
}
let bind_addr = format!(
"{}:{}",
proxy_url.host_str().unwrap(),
proxy_url.port().unwrap()
);
if need_start {
self.peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
self.run_net_update_task().await;
}
Ok(())
}
async fn handle_port_forward_connection(
mut incoming_socket: tokio::net::TcpStream,
connector: Box<dyn AsyncTcpConnector<S = SocksTcpStream> + Send>,
dst_addr: SocketAddr,
) {
let outgoing_socket = match connector.tcp_connect(dst_addr, 10).await {
Ok(socket) => socket,
Err(e) => {
tracing::error!("port forward: failed to connect to destination: {:?}", e);
return;
}
};
let mut outgoing_socket = outgoing_socket;
match tokio::io::copy_bidirectional(&mut incoming_socket, &mut outgoing_socket).await {
Ok((from_client, from_server)) => {
tracing::info!(
"port forward connection finished: client->server: {} bytes, server->client: {} bytes",
from_client, from_server
);
}
Err(e) => {
tracing::error!("port forward connection error: {:?}", e);
}
}
}
pub async fn add_port_forward(&self, cfg: PortForwardConfig) -> Result<(), Error> {
match cfg.proto.to_lowercase().as_str() {
"tcp" => {
self.add_tcp_port_forward(cfg.bind_addr, cfg.dst_addr)
.await?;
}
"udp" => {
self.add_udp_port_forward(cfg.bind_addr, cfg.dst_addr)
.await?;
}
_ => {
return Err(anyhow::anyhow!(
"unsupported protocol: {}, only support udp / tcp",
cfg.proto
)
.into());
}
}
self.global_ctx
.issue_event(GlobalCtxEvent::PortForwardAdded(cfg.clone().into()));
Ok(())
}
pub async fn add_tcp_port_forward(
&self,
bind_addr: SocketAddr,
dst_addr: SocketAddr,
) -> Result<(), Error> {
let listener = {
let _g = self.global_ctx.net_ns.guard();
TcpListener::bind(bind_addr).await?
TcpListener::bind(bind_addr.parse::<SocketAddr>().unwrap()).await?
};
self.peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
self.run_net_update_task().await;
let net = self.net.clone();
let entries = self.entries.clone();
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
let forward_tasks = tasks.clone();
let kcp_endpoint = self.kcp_endpoint.lock().await.clone();
let peer_mgr = Arc::downgrade(&self.peer_manager.clone());
self.tasks.lock().unwrap().spawn(async move {
self.tasks.lock().await.spawn(async move {
loop {
let (incoming_socket, addr) = match listener.accept().await {
Ok(result) => result,
Err(err) => {
tracing::error!("port forward accept error = {:?}", err);
continue;
}
};
tracing::info!(
"port forward: accept new connection from {:?} to {:?}",
bind_addr,
dst_addr
);
let net_guard = net.lock().await;
let Some(net) = net_guard.as_ref() else {
tracing::error!("net is not ready");
continue;
};
let connector: Box<dyn AsyncTcpConnector<S = SocksTcpStream> + Send> =
if kcp_endpoint.is_none() {
Box::new(SmolTcpConnector {
net: net.smoltcp_net.clone(),
entries: entries.clone(),
current_entry: std::sync::Mutex::new(None),
})
} else {
let kcp_endpoint = kcp_endpoint.as_ref().unwrap().clone();
Box::new(Socks5KcpConnector {
kcp_endpoint,
peer_mgr: peer_mgr.clone(),
src_addr: addr,
})
};
forward_tasks
.lock()
.unwrap()
.spawn(Self::handle_port_forward_connection(
incoming_socket,
connector,
dst_addr,
));
}
});
Ok(())
}
#[tracing::instrument(name = "add_udp_port_forward", skip(self))]
pub async fn add_udp_port_forward(
&self,
bind_addr: SocketAddr,
dst_addr: SocketAddr,
) -> Result<(), Error> {
let socket = {
let _g = self.global_ctx.net_ns.guard();
Arc::new(UdpSocket::bind(bind_addr).await?)
};
let entries = self.entries.clone();
let net_ns = self.global_ctx.net_ns.clone();
let net = self.net.clone();
let udp_client_map = self.udp_client_map.clone();
let udp_forward_task = self.udp_forward_task.clone();
self.tasks.lock().unwrap().spawn(async move {
loop {
// we set the max buffer size of smoltcp to 8192, so we need to use a buffer size that is less than 8192 here.
let mut buf = vec![0u8; 8192];
let (len, addr) = match socket.recv_from(&mut buf).await {
Ok(result) => result,
Err(err) => {
tracing::error!("udp port forward recv error = {:?}", err);
continue;
}
};
tracing::trace!(
"udp port forward recv packet from {:?}, len = {}",
addr,
len
);
let udp_client_key = UdpClientKey {
client_addr: addr,
dst_addr,
};
let binded_socket = udp_client_map.get(&udp_client_key);
let client_info = match binded_socket {
Some(s) => s.clone(),
None => {
let _g = net_ns.guard();
// reserve a port so os will not use it to connect to the virtual network
let binded_socket = tokio::net::UdpSocket::bind("0.0.0.0:0").await;
if binded_socket.is_err() {
tracing::error!("udp port forward bind error = {:?}", binded_socket);
continue;
match listener.accept().await {
Ok((socket, _addr)) => {
tracing::info!("accept a new connection, {:?}", socket);
if let Some(net) = net.lock().await.as_ref() {
net.handle_tcp_stream(socket);
}
let binded_socket = binded_socket.unwrap();
let mut local_addr = binded_socket.local_addr().unwrap();
let Some(cur_ipv4) = net.lock().await.as_ref().map(|net| net.ipv4_addr) else {
continue;
};
local_addr.set_ip(cur_ipv4.address().into());
let entry_key = Socks5Entry {
src: local_addr,
dst: dst_addr,
entry_type: UDP_ENTRY,
};
tracing::debug!("udp port forward binded socket = {:?}, entry_key = {:?}", local_addr, entry_key);
let client_info = Arc::new(UdpClientInfo {
client_addr: addr,
port_holder_socket: Arc::new(binded_socket),
local_addr,
last_active: AtomicCell::new(Instant::now()),
entries: entries.clone(),
entry_key,
});
udp_client_map.insert(udp_client_key.clone(), client_info.clone());
client_info
}
};
client_info.last_active.store(Instant::now());
let entry_data = match entries.get(&client_info.entry_key) {
Some(data) => data,
None => {
let guard = net.lock().await;
let Some(net) = guard.as_ref() else {
continue;
};
let local_addr = net.ipv4_addr;
let sokcs_udp = if dst_addr.ip() == local_addr.address() {
SocksUdpSocket::UdpSocket(client_info.port_holder_socket.clone())
} else {
tracing::debug!("udp port forward bind new smol udp socket, {:?}", local_addr);
SocksUdpSocket::SmolUdpSocket(
net.smoltcp_net
.udp_bind(SocketAddr::new(
IpAddr::V4(local_addr.address()),
client_info.local_addr.port(),
))
.await
.unwrap(),
)
};
let socks_udp = Arc::new(sokcs_udp);
entries.insert(
client_info.entry_key.clone(),
Socks5EntryData::Udp((socks_udp.clone(), udp_client_key.clone())),
);
let socks = socket.clone();
let client_addr = addr;
udp_forward_task.insert(
udp_client_key.clone(),
ScopedTask::from(tokio::spawn(async move {
loop {
let mut buf = vec![0u8; 8192];
match socks_udp.recv_from(&mut buf).await {
Ok((len, dst_addr)) => {
tracing::trace!(
"udp port forward recv response packet from {:?}, len = {}, client_addr = {:?}",
dst_addr,
len,
client_addr
);
if let Err(e) = socks.send_to(&buf[..len], client_addr).await {
tracing::error!("udp forward send error = {:?}", e);
}
}
Err(e) => {
tracing::error!("udp forward recv error = {:?}", e);
}
}
}
})),
);
entries.get(&client_info.entry_key).unwrap()
}
};
let s = match entry_data.value() {
Socks5EntryData::Udp((s, _)) => s.clone(),
_ => {
panic!("udp entry data is not udp entry data");
}
};
drop(entry_data);
if let Err(e) = s.send_to(&buf[..len], dst_addr).await {
tracing::error!(?dst_addr, ?len, "udp port forward send error = {:?}", e);
} else {
tracing::trace!(?dst_addr, ?len, "udp port forward send packet success");
Err(err) => tracing::error!("accept error = {:?}", err),
}
}
});
// clean up task
let udp_client_map = self.udp_client_map.clone();
let udp_forward_task = self.udp_forward_task.clone();
let entries = self.entries.clone();
self.tasks.lock().unwrap().spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
let now = Instant::now();
udp_client_map.retain(|_, client_info| {
now.duration_since(client_info.last_active.load()).as_secs() < 600
});
udp_forward_task.retain(|k, _| udp_client_map.contains_key(&k));
entries.retain(|_, data| match data {
Socks5EntryData::Udp((_, udp_client_key)) => {
udp_client_map.contains_key(&udp_client_key)
}
_ => true,
});
}
});
Ok(())
}
}

View File

@@ -351,10 +351,9 @@ impl<C: NatDstConnector> PeerPacketFilter for TcpProxy<C> {
#[async_trait::async_trait]
impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let Some(my_ipv4_inet) = self.get_local_inet() else {
let Some(my_ipv4) = self.get_local_ip() else {
return false;
};
let my_ipv4 = my_ipv4_inet.address();
let data = zc_packet.payload();
let ip_packet = Ipv4Packet::new(data).unwrap();
@@ -378,7 +377,7 @@ impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
// for kcp proxy, the src ip of nat entry will be converted from my ip to fake ip
// here we need to convert it back
if !self.is_smoltcp_enabled() && dst_addr.ip() == Self::get_fake_local_ipv4(&my_ipv4_inet) {
if !self.is_smoltcp_enabled() && dst_addr.ip() == Self::get_fake_local_ipv4(my_ipv4) {
dst_addr.set_ip(IpAddr::V4(my_ipv4));
need_transform_dst = true;
}
@@ -621,15 +620,13 @@ impl<C: NatDstConnector> TcpProxy<C> {
continue;
};
let my_ip_inet = global_ctx.get_ipv4();
let my_ip = my_ip_inet
let my_ip = global_ctx
.get_ipv4()
.as_ref()
.map(Ipv4Inet::address)
.unwrap_or(Ipv4Addr::UNSPECIFIED);
if my_ip_inet.is_some()
&& socket_addr.ip() == Self::get_fake_local_ipv4(&my_ip_inet.unwrap())
{
if socket_addr.ip() == Self::get_fake_local_ipv4(my_ip) {
socket_addr.set_ip(IpAddr::V4(my_ip));
}
@@ -771,14 +768,13 @@ impl<C: NatDstConnector> TcpProxy<C> {
}
pub fn get_local_ip(&self) -> Option<Ipv4Addr> {
self.get_local_inet().map(|inet| inet.address())
}
pub fn get_local_inet(&self) -> Option<Ipv4Inet> {
if self.is_smoltcp_enabled() {
Some(Ipv4Inet::new(Ipv4Addr::new(192, 88, 99, 254), 24).unwrap())
Some(Ipv4Addr::new(192, 88, 99, 254))
} else {
self.global_ctx.get_ipv4().as_ref().cloned()
self.global_ctx
.get_ipv4()
.as_ref()
.map(cidr::Ipv4Inet::address)
}
}
@@ -791,8 +787,9 @@ impl<C: NatDstConnector> TcpProxy<C> {
.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn get_fake_local_ipv4(local_ip: &Ipv4Inet) -> Ipv4Addr {
local_ip.first_address()
pub fn get_fake_local_ipv4(local_ip: Ipv4Addr) -> Ipv4Addr {
let octets = local_ip.octets();
Ipv4Addr::new(octets[0], octets[1], octets[2], 0)
}
async fn try_handle_peer_packet(&self, packet: &mut ZCPacket) -> Option<()> {
@@ -803,8 +800,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
return None;
}
let ipv4_inet = self.get_local_inet()?;
let ipv4_addr = ipv4_inet.address();
let ipv4_addr = self.get_local_ip()?;
let hdr = packet.peer_manager_header().unwrap().clone();
if hdr.packet_type != PacketType::Data as u8 || hdr.is_no_proxy() {
@@ -853,7 +849,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
let mut ip_packet = MutableIpv4Packet::new(payload_bytes).unwrap();
if !self.is_smoltcp_enabled() && source_ip == ipv4_addr {
// modify the source so the response packet can be handled by tun device
ip_packet.set_source(Self::get_fake_local_ipv4(&ipv4_inet));
ip_packet.set_source(Self::get_fake_local_ipv4(ipv4_addr));
}
ip_packet.set_destination(ipv4_addr);
let source = ip_packet.get_source();

View File

@@ -20,7 +20,7 @@ use smoltcp::{
time::{Duration, Instant},
wire::{HardwareAddress, IpAddress, IpCidr},
};
pub use socket::{TcpListener, TcpStream, UdpSocket};
pub use socket::{TcpListener, TcpStream};
pub use socket_allocator::BufferSize;
use tokio::sync::Notify;
@@ -158,13 +158,6 @@ impl Net {
)
.await
}
/// This function will create a new UDP socket and attempt to bind it to the `addr` provided.
pub async fn udp_bind(&self, addr: SocketAddr) -> io::Result<UdpSocket> {
let addr = self.set_address(addr);
UdpSocket::new(self.reactor.clone(), addr.into()).await
}
fn set_address(&self, mut addr: SocketAddr) -> SocketAddr {
if addr.ip().is_unspecified() {
addr.set_ip(match self.ip_addr.address() {

View File

@@ -2,7 +2,6 @@ use super::{reactor::Reactor, socket_allocator::SocketHandle};
use futures::future::{self, poll_fn};
use futures::{ready, Stream};
pub use smoltcp::socket::tcp;
use smoltcp::socket::udp;
use smoltcp::wire::{IpAddress, IpEndpoint};
use std::mem::replace;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
@@ -248,86 +247,3 @@ impl AsyncWrite for TcpStream {
Poll::Pending
}
}
/// A UDP socket.
pub struct UdpSocket {
handle: SocketHandle,
reactor: Arc<Reactor>,
local_addr: SocketAddr,
}
impl UdpSocket {
pub(super) async fn new(
reactor: Arc<Reactor>,
local_endpoint: IpEndpoint,
) -> io::Result<UdpSocket> {
let handle = reactor.socket_allocator().new_udp_socket();
{
let mut socket = reactor.get_socket::<udp::Socket>(*handle);
socket.bind(local_endpoint).map_err(map_err)?;
}
let local_addr = ep2sa(&local_endpoint);
Ok(UdpSocket {
handle,
reactor,
local_addr,
})
}
/// Note that on multiple calls to a poll_* method in the send direction, only the Waker from the Context passed to the most recent call will be scheduled to receive a wakeup.
pub fn poll_send_to(
&self,
cx: &mut Context<'_>,
buf: &[u8],
target: SocketAddr,
) -> Poll<io::Result<usize>> {
let mut socket = self.reactor.get_socket::<udp::Socket>(*self.handle);
let target_ip: IpEndpoint = target.into();
match socket.send_slice(buf, target_ip) {
// the buffer is full
Err(udp::SendError::BufferFull) => {}
r => {
r.map_err(map_err)?;
self.reactor.notify();
return Poll::Ready(Ok(buf.len()));
}
}
socket.register_send_waker(cx.waker());
Poll::Pending
}
/// See note on `poll_send_to`
pub async fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
poll_fn(|cx| self.poll_send_to(cx, buf, target)).await
}
/// Note that on multiple calls to a poll_* method in the recv direction, only the Waker from the Context passed to the most recent call will be scheduled to receive a wakeup.
pub fn poll_recv_from(
&self,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<(usize, SocketAddr)>> {
let mut socket = self.reactor.get_socket::<udp::Socket>(*self.handle);
match socket.recv_slice(buf) {
// the buffer is empty
Err(udp::RecvError::Exhausted) => {}
r => {
let (size, metadata) = r.map_err(map_err)?;
self.reactor.notify();
return Poll::Ready(Ok((size, ep2sa(&metadata.endpoint))));
}
}
socket.register_recv_waker(cx.waker());
Poll::Pending
}
/// See note on `poll_recv_from`
pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
poll_fn(|cx| self.poll_recv_from(cx, buf)).await
}
pub fn local_addr(&self) -> io::Result<SocketAddr> {
Ok(self.local_addr)
}
}

View File

@@ -1,7 +1,7 @@
use parking_lot::Mutex;
use smoltcp::{
iface::{SocketHandle as InnerSocketHandle, SocketSet},
socket::{tcp, udp},
socket::tcp,
time::Duration,
};
use std::{
@@ -14,11 +14,6 @@ use std::{
pub struct BufferSize {
pub tcp_rx_size: usize,
pub tcp_tx_size: usize,
pub udp_rx_size: usize,
pub udp_tx_size: usize,
pub udp_rx_meta_size: usize,
pub udp_tx_meta_size: usize,
}
impl Default for BufferSize {
@@ -26,11 +21,6 @@ impl Default for BufferSize {
BufferSize {
tcp_rx_size: 8192,
tcp_tx_size: 8192,
udp_rx_size: 8192,
udp_tx_size: 8192,
udp_rx_meta_size: 32,
udp_tx_meta_size: 32,
}
}
}
@@ -69,26 +59,6 @@ impl SocketAlloctor {
tcp
}
pub fn new_udp_socket(&self) -> SocketHandle {
let mut set = self.sockets.lock();
let handle = set.add(self.alloc_udp_socket());
SocketHandle::new(handle, self.sockets.clone())
}
fn alloc_udp_socket(&self) -> udp::Socket<'static> {
let rx_buffer = udp::PacketBuffer::new(
vec![udp::PacketMetadata::EMPTY; self.buffer_size.udp_rx_meta_size],
vec![0; self.buffer_size.udp_rx_size],
);
let tx_buffer = udp::PacketBuffer::new(
vec![udp::PacketMetadata::EMPTY; self.buffer_size.udp_tx_meta_size],
vec![0; self.buffer_size.udp_tx_size],
);
let udp = udp::Socket::new(rx_buffer, tx_buffer);
udp
}
}
pub struct SocketHandle(InnerSocketHandle, SharedSocketSet);

View File

@@ -1,104 +0,0 @@
use std::{sync::Arc, time::Duration};
use tokio::task::JoinSet;
use crate::{
peers::peer_manager::PeerManager,
proto::{
cli::Route,
common::Void,
magic_dns::{
HandshakeRequest, MagicDnsServerRpc, MagicDnsServerRpcClientFactory,
UpdateDnsRecordRequest,
},
rpc_impl::standalone::StandAloneClient,
rpc_types::controller::BaseController,
},
tunnel::tcp::TcpTunnelConnector,
};
use super::{DEFAULT_ET_DNS_ZONE, MAGIC_DNS_INSTANCE_ADDR};
pub struct MagicDnsClientInstance {
rpc_client: StandAloneClient<TcpTunnelConnector>,
rpc_stub: Option<Box<dyn MagicDnsServerRpc<Controller = BaseController> + Send>>,
peer_mgr: Arc<PeerManager>,
tasks: JoinSet<()>,
}
impl MagicDnsClientInstance {
pub async fn new(peer_mgr: Arc<PeerManager>) -> Result<Self, anyhow::Error> {
let tcp_connector = TcpTunnelConnector::new(MAGIC_DNS_INSTANCE_ADDR.parse().unwrap());
let mut rpc_client = StandAloneClient::new(tcp_connector);
let rpc_stub = rpc_client
.scoped_client::<MagicDnsServerRpcClientFactory<BaseController>>("".to_string())
.await?;
Ok(MagicDnsClientInstance {
rpc_client,
rpc_stub: Some(rpc_stub),
peer_mgr,
tasks: JoinSet::new(),
})
}
async fn update_dns_task(
peer_mgr: Arc<PeerManager>,
rpc_stub: Box<dyn MagicDnsServerRpc<Controller = BaseController> + Send>,
) -> Result<(), anyhow::Error> {
let mut prev_last_update = None;
rpc_stub
.handshake(BaseController::default(), HandshakeRequest::default())
.await?;
loop {
rpc_stub
.heartbeat(BaseController::default(), Void::default())
.await?;
let last_update = peer_mgr.get_route_peer_info_last_update_time().await;
if Some(last_update) == prev_last_update {
tokio::time::sleep(Duration::from_millis(500)).await;
continue;
}
prev_last_update = Some(last_update);
let mut routes = peer_mgr.list_routes().await;
// add self as a route
let ctx = peer_mgr.get_global_ctx();
routes.push(Route {
hostname: ctx.get_hostname(),
ipv4_addr: ctx.get_ipv4().map(Into::into),
..Default::default()
});
let req = UpdateDnsRecordRequest {
routes,
zone: DEFAULT_ET_DNS_ZONE.to_string(),
};
tracing::debug!(
"MagicDnsClientInstance::update_dns_task: update dns records: {:?}",
req
);
rpc_stub
.update_dns_record(BaseController::default(), req)
.await?;
}
}
pub async fn run_and_wait(&mut self) {
let rpc_stub = self.rpc_stub.take().unwrap();
let peer_mgr = self.peer_mgr.clone();
self.tasks.spawn(async move {
let ret = Self::update_dns_task(peer_mgr, rpc_stub).await;
if let Err(e) = ret {
tracing::error!("MagicDnsServerInstanceData::run_and_wait: {:?}", e);
}
});
tokio::select! {
_ = self.tasks.join_next() => {
tracing::warn!("MagicDnsServerInstanceData::run_and_wait: dns record update task exited");
}
_ = self.rpc_client.wait() => {
tracing::warn!("MagicDnsServerInstanceData::run_and_wait: rpc client exited");
}
}
}
}

View File

@@ -1,193 +0,0 @@
use hickory_proto::rr;
use hickory_proto::rr::RData;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr};
use std::str::FromStr;
use std::time::Duration;
#[derive(Serialize, Deserialize, Debug, Clone, derive_builder::Builder)]
pub struct RunConfig {
general: GeneralConfig,
#[builder(default = HashMap::new())]
zones: Zone,
#[builder(default = Vec::new())]
#[serde(default)]
excluded_forward_nameservers: Vec<IpAddr>,
}
impl RunConfig {
pub fn general(&self) -> &GeneralConfig {
&self.general
}
pub fn zones(&self) -> &Zone {
&self.zones
}
pub fn excluded_forward_nameservers(&self) -> &Vec<IpAddr> {
&self.excluded_forward_nameservers
}
}
#[derive(Serialize, Deserialize, Debug, Clone, derive_builder::Builder)]
pub struct GeneralConfig {
#[builder(setter(into, strip_option), default = None)]
listen_tcp: Option<String>,
#[builder(setter(into, strip_option), default = None)]
listen_udp: Option<String>,
}
impl GeneralConfig {
pub fn listen_tcp(&self) -> &Option<String> {
&self.listen_tcp
}
pub fn listen_udp(&self) -> &Option<String> {
&self.listen_udp
}
}
pub type Zone = HashMap<String, Vec<Record>>; // domain -> records
pub type RecordType = rr::RecordType;
#[derive(Serialize, Deserialize, Debug, Clone, derive_builder::Builder)]
pub struct Record {
#[serde(rename = "type")]
rr_type: RecordType,
name: String,
value: String,
#[serde(with = "humantime_serde")]
ttl: Duration,
}
impl Record {
fn name(&self) -> anyhow::Result<rr::Name> {
let name = rr::Name::from_str(self.name.as_str())?;
Ok(name)
}
fn rr_type(&self) -> rr::RecordType {
self.rr_type.clone().into()
}
}
impl TryFrom<Record> for rr::Record {
type Error = anyhow::Error;
fn try_from(value: Record) -> Result<Self, Self::Error> {
let r: rr::Record = (&value).try_into()?;
Ok(r)
}
}
impl TryFrom<&Record> for rr::Record {
type Error = anyhow::Error;
fn try_from(value: &Record) -> Result<Self, Self::Error> {
let name = value.name()?;
let mut record = Self::update0(name, value.ttl.as_secs() as u32, value.rr_type());
record.set_dns_class(rr::DNSClass::IN);
match value.rr_type {
RecordType::A => {
let addr: Ipv4Addr = value.value.parse()?;
record.set_data(RData::A(rr::rdata::a::A(addr)));
}
RecordType::SOA => {
let soa = value.value.split_whitespace().collect::<Vec<_>>();
if soa.len() != 7 {
return Err(anyhow::anyhow!("invalid SOA record"));
}
let mname = rr::Name::from_str(soa[0])?;
let rname = rr::Name::from_str(soa[1])?;
let serial: u32 = soa[2].parse()?;
let refresh: u32 = soa[3].parse()?;
let retry: u32 = soa[4].parse()?;
let expire: u32 = soa[5].parse()?;
let minimum: u32 = soa[6].parse()?;
record.set_data(RData::SOA(rr::rdata::soa::SOA::new(
mname,
rname,
serial,
refresh.try_into().unwrap(),
retry.try_into().unwrap(),
expire.try_into().unwrap(),
minimum,
)));
}
_ => todo!(),
}
Ok(record)
}
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::anyhow;
#[tokio::test]
async fn it_works() -> anyhow::Result<()> {
let text = r#"
[general]
listen_tcp = "127.0.0.1:5300"
listen_udp = "127.0.0.1:5353"
[[zones."et.internal"]]
type = "A"
name = "www"
value = "123.123.123.123"
ttl = "60s"
[[zones."et.top"]]
type = "A"
name = "@"
value = "100.100.100.100"
ttl = "61s"
"#;
let config = toml::from_str::<RunConfig>(text)?;
assert_eq!(
config.general.listen_tcp().clone().unwrap(),
"127.0.0.1:5300"
);
assert_eq!(
config.general.listen_udp().clone().unwrap(),
"127.0.0.1:5353"
);
assert_eq!(config.zones.len(), 2);
let (domain, records) = config
.zones
.get_key_value("et.internal")
.map_or(Err(anyhow!("parse error")), |x| Ok(x))?;
assert_eq!(domain, "et.internal");
assert_eq!(records.len(), 1);
let record = &records[0];
assert_eq!(record.rr_type, RecordType::A);
assert_eq!(record.name, "www");
assert_eq!(record.value, "123.123.123.123");
assert_eq!(record.ttl.as_secs(), 60);
let (domain, records) = config
.zones
.get_key_value("et.top")
.map_or(Err(anyhow!("parse error")), |x| Ok(x))?;
assert_eq!(domain, "et.top");
assert_eq!(records.len(), 1);
let record = &records[0];
assert_eq!(record.rr_type, RecordType::A);
assert_eq!(record.name, "@");
assert_eq!(record.value, "100.100.100.100");
assert_eq!(record.ttl.as_secs(), 61);
Ok(())
}
}

View File

@@ -1,15 +0,0 @@
// This module is copy and modified from https://github.com/fanyang89/libdns
pub(crate) mod config;
pub(crate) mod server;
pub mod client_instance;
pub mod runner;
pub mod server_instance;
pub mod system_config;
#[cfg(test)]
mod tests;
pub static MAGIC_DNS_INSTANCE_ADDR: &str = "tcp://127.0.0.1:49813";
pub static MAGIC_DNS_FAKE_IP: &str = "100.100.100.101";
pub static DEFAULT_ET_DNS_ZONE: &str = "et.net.";

View File

@@ -1,93 +0,0 @@
use cidr::Ipv4Inet;
use tokio_util::sync::CancellationToken;
use crate::peers::peer_manager::PeerManager;
use std::{net::Ipv4Addr, sync::Arc, time::Duration};
use super::{client_instance::MagicDnsClientInstance, server_instance::MagicDnsServerInstance};
static DEFAULT_ET_DNS_ZONE: &str = "et.net.";
pub struct DnsRunner {
client: Option<MagicDnsClientInstance>,
server: Option<MagicDnsServerInstance>,
peer_mgr: Arc<PeerManager>,
tun_dev: Option<String>,
tun_inet: Ipv4Inet,
fake_ip: Ipv4Addr,
}
impl DnsRunner {
pub fn new(
peer_mgr: Arc<PeerManager>,
tun_dev: Option<String>,
tun_inet: Ipv4Inet,
fake_ip: Ipv4Addr,
) -> Self {
Self {
client: None,
server: None,
peer_mgr,
tun_dev,
tun_inet,
fake_ip,
}
}
async fn clean_env(&mut self) {
if let Some(server) = self.server.take() {
server.clean_env().await;
}
self.client.take();
}
async fn run_once(&mut self) -> anyhow::Result<()> {
// try server first
match MagicDnsServerInstance::new(
self.peer_mgr.clone(),
self.tun_dev.clone(),
self.tun_inet,
self.fake_ip,
)
.await
{
Ok(server) => {
self.server = Some(server);
tracing::info!("DnsRunner::run_once: server started");
}
Err(e) => {
tracing::error!("DnsRunner::run_once: {:?}", e);
}
}
// every runner must run a client
let client = MagicDnsClientInstance::new(self.peer_mgr.clone()).await?;
self.client = Some(client);
self.client.as_mut().unwrap().run_and_wait().await;
return Err(anyhow::anyhow!("Client instance exit"));
}
pub async fn run(&mut self, canel_token: CancellationToken) {
loop {
tracing::info!("DnsRunner::run: start");
tokio::select! {
_ = canel_token.cancelled() => {
self.clean_env().await;
tracing::info!("DnsRunner::run: cancelled");
return;
}
ret = self.run_once() => {
self.clean_env().await;
if let Err(e) = ret {
tracing::error!("DnsRunner::run: {:?}", e);
} else {
tracing::info!("DnsRunner::run: unexpected exit, server may be down");
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
}
}
}
}

View File

@@ -1,338 +0,0 @@
use anyhow::{Context, Result};
use hickory_proto::op::Edns;
use hickory_proto::rr;
use hickory_proto::rr::LowerName;
use hickory_resolver::config::ResolverOpts;
use hickory_resolver::name_server::TokioConnectionProvider;
use hickory_resolver::system_conf::read_system_conf;
use hickory_server::authority::{AuthorityObject, Catalog, ZoneType};
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use hickory_server::store::forwarder::ForwardConfig;
use hickory_server::store::{forwarder::ForwardAuthority, in_memory::InMemoryAuthority};
use hickory_server::ServerFuture;
use std::io;
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::{TcpListener, UdpSocket};
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use tokio::task::JoinSet;
use crate::common::dns::get_default_resolver_config;
use super::config::{GeneralConfig, Record, RunConfig};
pub struct Server {
server: ServerFuture<CatalogRequestHandler>,
catalog: Arc<RwLock<Catalog>>,
general_config: GeneralConfig,
udp_local_addr: Option<SocketAddr>,
tcp_local_addr: Option<SocketAddr>,
tasks: JoinSet<()>,
}
struct CatalogRequestHandler {
catalog: Arc<RwLock<Catalog>>,
}
impl CatalogRequestHandler {
fn new(catalog: Arc<RwLock<Catalog>>) -> CatalogRequestHandler {
// let system_conf = read_system_conf();
// let recursor = match system_conf {
// Ok((conf, _)) => RecursorBuilder::default().build(conf),
// Err(_) => RecursorBuilder::default().build(get_default_resolver_config()),
// }
// // policy is security unware, this will never return an error
// .unwrap();
Self { catalog }
}
}
#[async_trait::async_trait]
impl RequestHandler for CatalogRequestHandler {
async fn handle_request<R: ResponseHandler>(
&self,
request: &Request,
response_handle: R,
) -> ResponseInfo {
self.catalog
.read()
.await
.handle_request(request, response_handle)
.await
}
}
pub fn build_authority(domain: &str, records: &[Record]) -> Result<InMemoryAuthority> {
let zone = rr::Name::from_str(domain)?;
let mut authority = InMemoryAuthority::empty(zone.clone(), ZoneType::Primary, false);
for record in records.iter() {
let r = record.try_into()?;
authority.upsert_mut(r, 0);
}
Ok(authority)
}
impl Server {
pub fn new(config: RunConfig) -> Self {
Self::try_new(config).unwrap()
}
fn try_new(config: RunConfig) -> Result<Self> {
let mut catalog = Catalog::new();
for (domain, records) in config.zones().iter() {
let zone = rr::Name::from_str(domain.as_str())?;
let authroty = build_authority(domain, records)?;
catalog.upsert(zone.clone().into(), vec![Arc::new(authroty)]);
}
// use forwarder authority for the root zone
let system_conf =
read_system_conf().unwrap_or((get_default_resolver_config(), ResolverOpts::default()));
let forward_config = ForwardConfig {
name_servers: system_conf
.0
.name_servers()
.iter()
.cloned()
.filter(|x| {
!config
.excluded_forward_nameservers()
.contains(&x.socket_addr.ip())
})
.collect::<Vec<_>>()
.into(),
options: Some(system_conf.1),
};
let auth = ForwardAuthority::builder_with_config(
forward_config,
TokioConnectionProvider::default(),
)
.build()
.unwrap();
catalog.upsert(rr::Name::from_str(".")?.into(), vec![Arc::new(auth)]);
let catalog = Arc::new(RwLock::new(catalog));
let handler = CatalogRequestHandler::new(catalog.clone());
let server = ServerFuture::new(handler);
Ok(Self {
server,
catalog,
general_config: config.general().clone(),
udp_local_addr: None,
tcp_local_addr: None,
tasks: JoinSet::new(),
})
}
pub fn udp_local_addr(&self) -> Option<SocketAddr> {
self.udp_local_addr
}
pub fn tcp_local_addr(&self) -> Option<SocketAddr> {
self.tcp_local_addr
}
pub async fn register_udp_socket(&mut self, address: String) -> Result<SocketAddr> {
let bind_addr = SocketAddr::from_str(&address)
.with_context(|| format!("DNS Server failed to parse address {}", address))?;
let socket = socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::DGRAM,
Some(socket2::Protocol::UDP),
)
.with_context(|| {
format!(
"DNS Server failed to create UDP socket for address {}",
address.to_string()
)
})?;
socket2::SockRef::from(&socket)
.set_reuse_address(true)
.with_context(|| {
format!(
"DNS Server failed to set reuse address on socket {}",
address.to_string()
)
})?;
socket.bind(&bind_addr.into()).with_context(|| {
format!("DNS Server failed to bind socket to address {}", bind_addr)
})?;
socket
.set_nonblocking(true)
.with_context(|| format!("DNS Server failed to set socket to non-blocking"))?;
let socket = UdpSocket::from_std(socket.into()).with_context(|| {
format!(
"DNS Server failed to convert socket to UdpSocket for address {}",
address.to_string()
)
})?;
let local_addr = socket
.local_addr()
.with_context(|| format!("DNS Server failed to get local address"))?;
self.server.register_socket(socket);
Ok(local_addr)
}
pub async fn run(&mut self) -> Result<()> {
if let Some(address) = self.general_config.listen_tcp() {
let tcp_listener = TcpListener::bind(address.clone())
.await
.with_context(|| format!("DNS Server failed to bind TCP address {}", address))?;
self.tcp_local_addr = Some(tcp_listener.local_addr()?);
self.server
.register_listener(tcp_listener, Duration::from_secs(5));
}
if let Some(address) = self.general_config.listen_udp() {
let local_addr = self.register_udp_socket(address.clone()).await?;
self.udp_local_addr = Some(local_addr);
};
Ok(())
}
pub async fn shutdown(&mut self) -> Result<()> {
self.server.shutdown_gracefully().await?;
Ok(())
}
pub async fn upsert(&self, name: LowerName, authority: Arc<dyn AuthorityObject>) {
self.catalog.write().await.upsert(name, vec![authority]);
}
pub async fn remove(&self, name: &LowerName) -> Option<Vec<Arc<dyn AuthorityObject>>> {
self.catalog.write().await.remove(name)
}
pub async fn update<R: ResponseHandler>(
&self,
update: &Request,
response_edns: Option<Edns>,
response_handle: R,
) -> io::Result<ResponseInfo> {
self.catalog
.write()
.await
.update(update, response_edns, response_handle)
.await
}
pub async fn contains(&self, name: &LowerName) -> bool {
self.catalog.read().await.contains(name)
}
pub async fn lookup<R: ResponseHandler>(
&self,
request: &Request,
response_edns: Option<Edns>,
response_handle: R,
) -> ResponseInfo {
self.catalog
.read()
.await
.lookup(request, response_edns, response_handle)
.await
}
pub async fn read_catalog(&self) -> RwLockReadGuard<'_, Catalog> {
self.catalog.read().await
}
pub async fn write_catalog(&self) -> RwLockWriteGuard<'_, Catalog> {
self.catalog.write().await
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::instance::dns_server::config::{
GeneralConfigBuilder, RecordBuilder, RecordType, RunConfigBuilder,
};
use anyhow::Result;
use hickory_client::client::{Client, ClientHandle};
use hickory_proto::rr;
use hickory_proto::runtime::TokioRuntimeProvider;
use hickory_proto::udp::UdpClientStream;
use maplit::hashmap;
use std::time::Duration;
#[tokio::test]
async fn it_works() -> Result<()> {
let mut server = Server::new(
RunConfigBuilder::default()
.general(GeneralConfigBuilder::default().build()?)
.build()?,
);
server.run().await?;
server.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn can_resolve_records() -> Result<()> {
let configured_record = RecordBuilder::default()
.rr_type(RecordType::A)
.name("www.et.internal.".to_string())
.value("123.123.123.123".to_string())
.ttl(Duration::from_secs(60))
.build()?;
let configured_record2 = RecordBuilder::default()
.rr_type(RecordType::A)
.name("中文.et.internal.".to_string())
.value("123.123.123.123".to_string())
.ttl(Duration::from_secs(60))
.build()?;
let soa_record = RecordBuilder::default()
.rr_type(RecordType::SOA)
.name("et.internal.".to_string())
.value(
"ns.et.internal. hostmaster.et.internal. 2023101001 7200 3600 1209600 86400"
.to_string(),
)
.ttl(Duration::from_secs(60))
.build()?;
let config = RunConfigBuilder::default()
.general(
GeneralConfigBuilder::default()
.listen_udp("127.0.0.1:0")
.build()?,
)
.zones(hashmap! {
"et.internal.".to_string() => vec![configured_record.clone(), soa_record.clone(), configured_record2.clone()],
})
.build()?;
let mut server = Server::new(config);
server.run().await?;
let local_addr = server.udp_local_addr().unwrap();
let stream = UdpClientStream::builder(local_addr, TokioRuntimeProvider::default()).build();
let (mut client, background) = Client::connect(stream).await?;
let background_task = tokio::spawn(background);
let response = client
.query(
rr::Name::from_str("www.et.internal")?,
rr::DNSClass::IN,
rr::RecordType::A,
)
.await?;
drop(background_task);
println!("Response: {:?}", response);
assert_eq!(response.answers().len(), 1);
let expected_record: rr::Record = configured_record.try_into()?;
assert_eq!(response.answers().first().unwrap(), &expected_record);
server.shutdown().await?;
Ok(())
}
}

View File

@@ -1,446 +0,0 @@
// single-instance server in one machine, every easytier instance that has ip address and tun device will try create a server instance.
// magic dns client will connect to this server to update the dns records.
// magic dns server will add the dns server ip address to the tun device, and forward the dns request to the dns server
// magic dns client will establish a long live tcp connection to the magic dns server, and when the server stops or crashes,
// all the clients will exit and let the easytier instance to launch a new server instance.
use std::{collections::BTreeMap, net::Ipv4Addr, str::FromStr, sync::Arc, time::Duration};
use anyhow::Context;
use cidr::Ipv4Inet;
use dashmap::DashMap;
use hickory_proto::rr::LowerName;
use multimap::MultiMap;
use pnet::packet::{
icmp::{self, IcmpTypes, MutableIcmpPacket},
ip::IpNextHeaderProtocols,
ipv4::{self, MutableIpv4Packet},
tcp::{self, MutableTcpPacket},
udp::{self, MutableUdpPacket},
MutablePacket,
};
use crate::{
common::{
ifcfg::{IfConfiger, IfConfiguerTrait},
PeerId,
},
instance::dns_server::{
config::{Record, RecordBuilder, RecordType},
server::build_authority,
DEFAULT_ET_DNS_ZONE,
},
peers::{peer_manager::PeerManager, NicPacketFilter},
proto::{
cli::Route,
common::{TunnelInfo, Void},
magic_dns::{
dns_record::{self},
DnsRecord, DnsRecordA, DnsRecordList, GetDnsRecordResponse, HandshakeRequest,
HandshakeResponse, MagicDnsServerRpc, MagicDnsServerRpcServer, UpdateDnsRecordRequest,
},
rpc_impl::standalone::{RpcServerHook, StandAloneServer},
rpc_types::controller::{BaseController, Controller},
},
tunnel::{packet_def::ZCPacket, tcp::TcpTunnelListener},
};
use super::{
config::{GeneralConfigBuilder, RunConfigBuilder},
server::Server,
system_config::{OSConfig, SystemConfig},
MAGIC_DNS_INSTANCE_ADDR,
};
static NIC_PIPELINE_NAME: &str = "magic_dns_server";
pub(super) struct MagicDnsServerInstanceData {
dns_server: Server,
tun_dev: Option<String>,
tun_ip: Ipv4Addr,
fake_ip: Ipv4Addr,
my_peer_id: PeerId,
// zone -> (tunnel remote addr -> route)
route_infos: DashMap<String, MultiMap<url::Url, Route>>,
system_config: Option<Box<dyn SystemConfig>>,
}
impl MagicDnsServerInstanceData {
pub async fn update_dns_records<'a, T: Iterator<Item = &'a Route>>(
&self,
routes: T,
zone: &str,
) -> Result<(), anyhow::Error> {
let mut records: Vec<Record> = vec![];
for route in routes {
if route.hostname.is_empty() {
continue;
}
let Some(ipv4_addr) = route.ipv4_addr.unwrap_or_default().address else {
continue;
};
let record = RecordBuilder::default()
.rr_type(RecordType::A)
.name(format!("{}.{}", route.hostname, zone))
.value(ipv4_addr.to_string())
.ttl(Duration::from_secs(1))
.build()?;
records.push(record);
}
let soa_record = RecordBuilder::default()
.rr_type(RecordType::SOA)
.name(zone.to_string())
.value(format!(
"ns.{} hostmaster.{} 2023101001 7200 3600 1209600 86400",
zone, zone
))
.ttl(Duration::from_secs(60))
.build()?;
records.push(soa_record);
let authority = build_authority(zone, &records)?;
self.dns_server
.upsert(
LowerName::from_str(zone)
.with_context(|| "Invalid zone name, expect fomat like \"et.net.\"")?,
Arc::new(authority),
)
.await;
tracing::debug!("Updated DNS records for zone {}: {:?}", zone, records);
Ok(())
}
pub async fn update(&self) {
for item in self.route_infos.iter() {
let zone = item.key();
let route_iter = item.value().flat_iter().map(|x| x.1);
if let Err(e) = self.update_dns_records(route_iter, zone).await {
tracing::error!("Failed to update DNS records for zone {}: {:?}", zone, e);
}
}
}
fn do_system_config(&self, zone: &str) -> Result<(), anyhow::Error> {
if let Some(c) = &self.system_config {
c.set_dns(&OSConfig {
nameservers: vec![self.fake_ip.to_string()],
search_domains: vec![zone.to_string()],
match_domains: vec![zone.to_string()],
})?;
}
Ok(())
}
}
#[async_trait::async_trait]
impl MagicDnsServerRpc for MagicDnsServerInstanceData {
type Controller = BaseController;
async fn handshake(
&self,
_ctrl: Self::Controller,
_input: HandshakeRequest,
) -> crate::proto::rpc_types::error::Result<HandshakeResponse> {
Ok(Default::default())
}
async fn update_dns_record(
&self,
ctrl: Self::Controller,
input: UpdateDnsRecordRequest,
) -> crate::proto::rpc_types::error::Result<Void> {
let Some(tunnel_info) = ctrl.get_tunnel_info() else {
return Err(anyhow::anyhow!("No tunnel info").into());
};
let Some(remote_addr) = &tunnel_info.remote_addr else {
return Err(anyhow::anyhow!("No remote addr").into());
};
let zone = input.zone.clone();
self.route_infos
.entry(zone.clone())
.or_default()
.insert_many(remote_addr.clone().into(), input.routes);
self.update().await;
Ok(Default::default())
}
async fn get_dns_record(
&self,
_ctrl: Self::Controller,
_input: Void,
) -> crate::proto::rpc_types::error::Result<GetDnsRecordResponse> {
let mut ret = BTreeMap::new();
for item in self.route_infos.iter() {
let zone = item.key();
let routes = item.value();
let mut dns_records = DnsRecordList::default();
for route in routes.iter().map(|x| x.1) {
dns_records.records.push(DnsRecord {
record: Some(dns_record::Record::A(DnsRecordA {
name: format!("{}.{}", route.hostname, zone),
value: route.ipv4_addr.unwrap_or_default().address,
ttl: 1,
})),
});
}
ret.insert(zone.clone(), dns_records);
}
Ok(GetDnsRecordResponse { records: ret })
}
async fn heartbeat(
&self,
_ctrl: Self::Controller,
_input: Void,
) -> crate::proto::rpc_types::error::Result<Void> {
Ok(Default::default())
}
}
#[async_trait::async_trait]
impl NicPacketFilter for MagicDnsServerInstanceData {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let data = zc_packet.mut_payload();
let mut ip_packet = MutableIpv4Packet::new(data).unwrap();
if ip_packet.get_version() != 4 || ip_packet.get_destination() != self.fake_ip {
return false;
}
match ip_packet.get_next_level_protocol() {
IpNextHeaderProtocols::Udp => {
let Some(dns_udp_addr) = self.dns_server.udp_local_addr() else {
return false;
};
let Some(mut udp_packet) = MutableUdpPacket::new(ip_packet.payload_mut()) else {
return false;
};
if udp_packet.get_destination() == 53 {
// for dns request
udp_packet.set_destination(dns_udp_addr.port());
} else if udp_packet.get_source() == dns_udp_addr.port() {
// for dns response
udp_packet.set_source(53);
} else {
return false;
}
udp_packet.set_checksum(udp::ipv4_checksum(
&udp_packet.to_immutable(),
&self.fake_ip,
&self.tun_ip,
));
}
IpNextHeaderProtocols::Tcp => {
let Some(dns_tcp_addr) = self.dns_server.tcp_local_addr() else {
return false;
};
let Some(mut tcp_packet) = MutableTcpPacket::new(ip_packet.payload_mut()) else {
return false;
};
if tcp_packet.get_destination() == 53 {
// for dns request
tcp_packet.set_destination(dns_tcp_addr.port());
} else if tcp_packet.get_source() == dns_tcp_addr.port() {
// for dns response
tcp_packet.set_source(53);
} else {
return false;
}
tcp_packet.set_checksum(tcp::ipv4_checksum(
&tcp_packet.to_immutable(),
&self.fake_ip,
&self.tun_ip,
));
}
IpNextHeaderProtocols::Icmp => {
let Some(mut icmp_packet) = MutableIcmpPacket::new(ip_packet.payload_mut()) else {
return false;
};
if icmp_packet.get_icmp_type() != IcmpTypes::EchoRequest {
return false;
}
icmp_packet.set_icmp_type(IcmpTypes::EchoReply);
icmp_packet.set_checksum(icmp::checksum(&icmp_packet.to_immutable()));
}
_ => {
return false;
}
}
ip_packet.set_source(self.fake_ip);
ip_packet.set_destination(self.tun_ip);
ip_packet.set_checksum(ipv4::checksum(&ip_packet.to_immutable()));
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.my_peer_id.into();
true
}
fn id(&self) -> String {
NIC_PIPELINE_NAME.to_string()
}
}
#[async_trait::async_trait]
impl RpcServerHook for MagicDnsServerInstanceData {
async fn on_new_client(&self, tunnel_info: Option<TunnelInfo>) {
println!("New client connected: {:?}", tunnel_info);
}
async fn on_client_disconnected(&self, tunnel_info: Option<TunnelInfo>) {
println!("Client disconnected: {:?}", tunnel_info);
let Some(tunnel_info) = tunnel_info else {
return;
};
let Some(remote_addr) = tunnel_info.remote_addr else {
return;
};
let remote_addr = remote_addr.into();
for mut item in self.route_infos.iter_mut() {
item.value_mut().remove(&remote_addr);
}
self.route_infos.retain(|_, v| !v.is_empty());
self.update().await;
}
}
pub struct MagicDnsServerInstance {
rpc_server: StandAloneServer<TcpTunnelListener>,
pub(super) data: Arc<MagicDnsServerInstanceData>,
peer_mgr: Arc<PeerManager>,
tun_inet: Ipv4Inet,
}
fn get_system_config(
_tun_name: Option<&str>,
) -> Result<Option<Box<dyn SystemConfig>>, anyhow::Error> {
#[cfg(target_os = "windows")]
{
use super::system_config::windows::WindowsDNSManager;
let tun_name = _tun_name.ok_or_else(|| anyhow::anyhow!("No tun name"))?;
return Ok(Some(Box::new(WindowsDNSManager::new(tun_name)?)));
}
#[cfg(target_os = "macos")]
{
use super::system_config::darwin::DarwinConfigurator;
return Ok(Some(Box::new(DarwinConfigurator::new())));
}
#[allow(unreachable_code)]
Ok(None)
}
impl MagicDnsServerInstance {
pub async fn new(
peer_mgr: Arc<PeerManager>,
tun_dev: Option<String>,
tun_inet: Ipv4Inet,
fake_ip: Ipv4Addr,
) -> Result<Self, anyhow::Error> {
let tcp_listener = TcpTunnelListener::new(MAGIC_DNS_INSTANCE_ADDR.parse().unwrap());
let mut rpc_server = StandAloneServer::new(tcp_listener);
rpc_server.serve().await?;
let bind_addr = tun_inet.address();
let dns_config = RunConfigBuilder::default()
.general(
GeneralConfigBuilder::default()
.listen_udp(format!("{}:0", bind_addr))
.listen_tcp(format!("{}:0", bind_addr))
.build()
.unwrap(),
)
.excluded_forward_nameservers(vec![fake_ip.into()])
.build()
.unwrap();
let mut dns_server = Server::new(dns_config);
dns_server.run().await?;
if !tun_inet.contains(&fake_ip) && tun_dev.is_some() {
let cost = if cfg!(target_os = "windows") {
Some(4)
} else {
None
};
let ifcfg = IfConfiger {};
ifcfg
.add_ipv4_route(tun_dev.as_ref().unwrap(), fake_ip, 32, cost)
.await?;
}
let data = Arc::new(MagicDnsServerInstanceData {
dns_server,
tun_dev: tun_dev.clone(),
tun_ip: tun_inet.address(),
fake_ip,
my_peer_id: peer_mgr.my_peer_id(),
route_infos: DashMap::new(),
system_config: get_system_config(tun_dev.as_deref())?,
});
rpc_server
.registry()
.register(MagicDnsServerRpcServer::new(data.clone()), "");
rpc_server.set_hook(data.clone());
peer_mgr
.add_nic_packet_process_pipeline(Box::new(data.clone()))
.await;
let data_clone = data.clone();
tokio::task::spawn_blocking(move || data_clone.do_system_config(DEFAULT_ET_DNS_ZONE))
.await
.context("Failed to configure system")??;
Ok(Self {
rpc_server,
data,
peer_mgr,
tun_inet,
})
}
pub async fn clean_env(&self) {
if let Some(configer) = &self.data.system_config {
let ret = configer.close();
if let Err(e) = ret {
tracing::error!("Failed to close system config: {:?}", e);
}
}
if !self.tun_inet.contains(&self.data.fake_ip) && self.data.tun_dev.is_some() {
let ifcfg = IfConfiger {};
let _ = ifcfg
.remove_ipv4_route(&self.data.tun_dev.as_ref().unwrap(), self.data.fake_ip, 32)
.await;
}
let _ = self
.peer_mgr
.remove_nic_packet_process_pipeline(NIC_PIPELINE_NAME.to_string())
.await;
}
}
impl Drop for MagicDnsServerInstance {
fn drop(&mut self) {
println!("MagicDnsServerInstance dropped");
}
}

View File

@@ -1,135 +0,0 @@
use std::{
collections::HashSet,
fs::{self, OpenOptions},
io::{self, Write},
os::unix::fs::PermissionsExt,
path::Path,
};
use super::{OSConfig, SystemConfig};
const MAC_RESOLVER_FILE_HEADER: &str = "# Added by easytier\n";
const ETC_RESOLVER: &str = "/etc/resolver";
const ETC_RESOLV_CONF: &str = "/etc/resolv.conf";
pub struct DarwinConfigurator {}
impl DarwinConfigurator {
pub fn new() -> Self {
DarwinConfigurator {}
}
pub fn do_close(&self) -> io::Result<()> {
self.remove_resolver_files(|_| true)
}
pub fn supports_split_dns(&self) -> bool {
true
}
pub fn do_set_dns(&self, cfg: &OSConfig) -> io::Result<()> {
fs::create_dir_all(ETC_RESOLVER)?;
let mut keep = HashSet::new();
// 写 search.easytier 文件
if !cfg.search_domains.is_empty() {
let search_file = "search.easytier";
keep.insert(search_file.to_string());
let mut content = String::from(MAC_RESOLVER_FILE_HEADER);
content.push_str("search");
for domain in &cfg.search_domains {
content.push(' ');
content.push_str(domain.trim_end_matches('.'));
}
content.push('\n');
Self::write_resolver_file(search_file, &content)?;
}
// 写 match_domains 文件
let mut ns_content = String::from(MAC_RESOLVER_FILE_HEADER);
for ns in &cfg.nameservers {
ns_content.push_str(&format!("nameserver {}\n", ns));
}
for domain in &cfg.match_domains {
let file_base = domain.trim_end_matches('.');
keep.insert(file_base.to_string());
Self::write_resolver_file(file_base, &ns_content)?;
}
// 删除未保留的 resolver 文件
self.remove_resolver_files(|domain| !keep.contains(domain))?;
Ok(())
}
fn write_resolver_file(file_name: &str, content: &str) -> io::Result<()> {
let path = Path::new(ETC_RESOLVER).join(file_name);
let mut file = OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(&path)?;
file.set_permissions(fs::Permissions::from_mode(0o644))?;
file.write_all(content.as_bytes())?;
Ok(())
}
fn remove_resolver_files<F>(&self, should_delete: F) -> io::Result<()>
where
F: Fn(&str) -> bool,
{
let entries = match fs::read_dir(ETC_RESOLVER) {
Ok(e) => e,
Err(e) if e.kind() == io::ErrorKind::NotFound => return Ok(()),
Err(e) => return Err(e),
};
for entry in entries {
let entry = entry?;
let file_type = entry.file_type()?;
if !file_type.is_file() {
continue;
}
let name = entry.file_name();
let name_str = name.to_string_lossy();
if !should_delete(&name_str) {
continue;
}
let full_path = entry.path();
let content = fs::read_to_string(&full_path)?;
if !content.starts_with(MAC_RESOLVER_FILE_HEADER) {
continue;
}
fs::remove_file(&full_path)?;
}
Ok(())
}
}
impl SystemConfig for DarwinConfigurator {
fn set_dns(&self, cfg: &OSConfig) -> io::Result<()> {
self.do_set_dns(cfg)
}
fn close(&self) -> io::Result<()> {
self.do_close()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn set_dns_test() -> io::Result<()> {
let config = OSConfig {
nameservers: vec!["8.8.8.8".into()],
search_domains: vec!["example.com".into()],
match_domains: vec!["test.local".into()],
};
let configurator = DarwinConfigurator::new();
configurator.set_dns(&config)?;
configurator.close()?;
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More