Compare commits
47 Commits
releases/v
...
v2.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7c3179c6e | ||
|
|
b0fd37949a | ||
|
|
29994b663a | ||
|
|
fc397c35c5 | ||
|
|
0f2b214918 | ||
|
|
fec885c427 | ||
|
|
5a2fd4465c | ||
|
|
83d1ecc4da | ||
|
|
7c6daf7c56 | ||
|
|
28fe6257be | ||
|
|
99430983bc | ||
|
|
d758a4958f | ||
|
|
95b12dda5a | ||
|
|
2675cf2d00 | ||
|
|
72be46e8fa | ||
|
|
c5580feb64 | ||
|
|
7e3819be86 | ||
|
|
f0302f2be7 | ||
|
|
b5f60f843d | ||
|
|
6bdfb8b01f | ||
|
|
ef1d81a2a1 | ||
|
|
739b4ee106 | ||
|
|
6a038e8a88 | ||
|
|
72ea8a9f76 | ||
|
|
44d93648ee | ||
|
|
75f7865769 | ||
|
|
01e3ad99ca | ||
|
|
3c0d85c9db | ||
|
|
b38991a14e | ||
|
|
465269566b | ||
|
|
f103fc13d9 | ||
|
|
e5917fad4e | ||
|
|
de8c89eb03 | ||
|
|
c142db301a | ||
|
|
8dc8c7d9e2 | ||
|
|
2b909e04ea | ||
|
|
e130c3f2e4 | ||
|
|
3ad754879f | ||
|
|
fd2b3768e1 | ||
|
|
67cff12c76 | ||
|
|
c5ea7848b3 | ||
|
|
34365a096e | ||
|
|
d880dfbbca | ||
|
|
b46a200f8d | ||
|
|
81490d0662 | ||
|
|
3d1e841cc5 | ||
|
|
f52936a103 |
134
.github/workflows/core.yml
vendored
134
.github/workflows/core.yml
vendored
@@ -31,6 +31,47 @@ jobs:
|
||||
skip_after_successful_duplicate: 'true'
|
||||
cancel_others: 'true'
|
||||
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml", ".github/workflows/install_rust.sh"]'
|
||||
build_web:
|
||||
runs-on: ubuntu-latest
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 21
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v3
|
||||
with:
|
||||
version: 9
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: |
|
||||
pnpm -r install
|
||||
pnpm -r --filter "./easytier-web/*" build
|
||||
|
||||
- name: Archive artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: easytier-web-dashboard
|
||||
path: |
|
||||
easytier-web/frontend/dist/*
|
||||
build:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -71,10 +112,12 @@ jobs:
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
ARTIFACT_NAME: windows-x86_64
|
||||
|
||||
- TARGET: aarch64-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
ARTIFACT_NAME: windows-arm64
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
ARTIFACT_NAME: windows-i686
|
||||
|
||||
- TARGET: x86_64-unknown-freebsd
|
||||
OS: ubuntu-22.04
|
||||
@@ -87,7 +130,9 @@ jobs:
|
||||
TARGET: ${{ matrix.TARGET }}
|
||||
OS: ${{ matrix.OS }}
|
||||
OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }}
|
||||
needs: pre_job
|
||||
needs:
|
||||
- pre_job
|
||||
- build_web
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
@@ -96,6 +141,12 @@ jobs:
|
||||
run: |
|
||||
echo "GIT_DESC=$(git log -1 --format=%cd.%h --date=format:%Y-%m-%d_%H:%M:%S)" >> $GITHUB_ENV
|
||||
|
||||
- name: Download web artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: easytier-web-dashboard
|
||||
path: easytier-web/frontend/dist/
|
||||
|
||||
- name: Cargo cache
|
||||
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
|
||||
uses: actions/cache@v4
|
||||
@@ -121,23 +172,27 @@ jobs:
|
||||
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
|
||||
cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier
|
||||
else
|
||||
if [[ $OS =~ ^windows.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
fi
|
||||
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
|
||||
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
|
||||
cargo build --release --verbose --target $TARGET
|
||||
fi
|
||||
|
||||
# Copied and slightly modified from @lmq8267 (https://github.com/lmq8267)
|
||||
- name: Build Core & Cli (X86_64 FreeBSD)
|
||||
uses: cross-platform-actions/action@v0.23.0
|
||||
uses: vmactions/freebsd-vm@v1
|
||||
if: ${{ endsWith(matrix.TARGET, 'freebsd') }}
|
||||
env:
|
||||
TARGET: ${{ matrix.TARGET }}
|
||||
with:
|
||||
operating_system: freebsd
|
||||
environment_variables: TARGET
|
||||
architecture: x86-64
|
||||
version: ${{ matrix.BSD_VERSION }}
|
||||
shell: bash
|
||||
memory: 5G
|
||||
cpu_count: 4
|
||||
envs: TARGET
|
||||
release: ${{ matrix.BSD_VERSION }}
|
||||
arch: x86_64
|
||||
usesh: true
|
||||
mem: 6144
|
||||
cpu: 4
|
||||
run: |
|
||||
uname -a
|
||||
echo $SHELL
|
||||
@@ -146,19 +201,21 @@ jobs:
|
||||
whoami
|
||||
env | sort
|
||||
|
||||
sudo pkg install -y git protobuf llvm-devel
|
||||
pkg install -y git protobuf llvm-devel sudo curl
|
||||
curl --proto 'https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
source $HOME/.cargo/env
|
||||
. $HOME/.cargo/env
|
||||
|
||||
rustup set auto-self-update disable
|
||||
|
||||
rustup install 1.84
|
||||
rustup default 1.84
|
||||
rustup install 1.86
|
||||
rustup default 1.86
|
||||
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
export CARGO_TERM_COLOR=always
|
||||
|
||||
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
|
||||
mv ./target/$TARGET/release/easytier-web ./target/$TARGET/release/easytier-web-embed
|
||||
cargo build --release --verbose --target $TARGET
|
||||
|
||||
- name: Install UPX
|
||||
@@ -174,12 +231,13 @@ jobs:
|
||||
# windows is the only OS using a different convention for executable file name
|
||||
if [[ $OS =~ ^windows.*$ && $TARGET =~ ^x86_64.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
cp easytier/third_party/Packet.dll ./artifacts/objects/
|
||||
cp easytier/third_party/wintun.dll ./artifacts/objects/
|
||||
cp easytier/third_party/*.dll ./artifacts/objects/
|
||||
elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^i686.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
cp easytier/third_party/i686/*.dll ./artifacts/objects/
|
||||
elif [[ $OS =~ ^windows.*$ && $TARGET =~ ^aarch64.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
cp easytier/third_party/arm64/Packet.dll ./artifacts/objects/
|
||||
cp easytier/third_party/arm64/wintun.dll ./artifacts/objects/
|
||||
cp easytier/third_party/arm64/*.dll ./artifacts/objects/
|
||||
fi
|
||||
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
|
||||
TAG=$GITHUB_REF_NAME
|
||||
@@ -196,6 +254,7 @@ jobs:
|
||||
mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/
|
||||
if [[ ! $TARGET =~ ^mips.*$ ]]; then
|
||||
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./artifacts/objects/
|
||||
mv ./target/$TARGET/release/easytier-web-embed"$SUFFIX" ./artifacts/objects/
|
||||
fi
|
||||
|
||||
mv ./artifacts/objects/* ./artifacts/
|
||||
@@ -213,8 +272,47 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- pre_job
|
||||
- build_web
|
||||
- build
|
||||
steps:
|
||||
- name: Mark result as failed
|
||||
if: needs.build.result != 'success'
|
||||
run: exit 1
|
||||
|
||||
magisk_build:
|
||||
needs:
|
||||
- pre_job
|
||||
- build_web
|
||||
- build
|
||||
if: needs.pre_job.outputs.should_skip != 'true' && always()
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4 # 必须先检出代码才能获取模块配置
|
||||
|
||||
# 下载二进制文件到独立目录
|
||||
- name: Download Linux aarch64 binaries
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: easytier-linux-aarch64
|
||||
path: ./downloaded-binaries/ # 独立目录避免冲突
|
||||
|
||||
# 将二进制文件复制到 Magisk 模块目录
|
||||
- name: Prepare binaries
|
||||
run: |
|
||||
mkdir -p ./easytier-contrib/easytier-magisk/
|
||||
cp ./downloaded-binaries/easytier-core ./easytier-contrib/easytier-magisk/
|
||||
cp ./downloaded-binaries/easytier-cli ./easytier-contrib/easytier-magisk/
|
||||
cp ./downloaded-binaries/easytier-web ./easytier-contrib/easytier-magisk/
|
||||
|
||||
|
||||
# 上传生成的模块
|
||||
- name: Upload Magisk Module
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Easytier-Magisk
|
||||
path: |
|
||||
./easytier-contrib/easytier-magisk
|
||||
!./easytier-contrib/easytier-magisk/build.sh
|
||||
!./easytier-contrib/easytier-magisk/magisk_update.json
|
||||
if-no-files-found: error
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
image_tag:
|
||||
description: 'Tag for this image build'
|
||||
type: string
|
||||
default: 'v2.2.2'
|
||||
default: 'v2.3.0'
|
||||
required: true
|
||||
mark_latest:
|
||||
description: 'Mark this image as latest'
|
||||
|
||||
105
.github/workflows/gui.yml
vendored
105
.github/workflows/gui.yml
vendored
@@ -63,6 +63,11 @@ jobs:
|
||||
GUI_TARGET: aarch64-pc-windows-msvc
|
||||
ARTIFACT_NAME: windows-arm64
|
||||
|
||||
- TARGET: i686-pc-windows-msvc
|
||||
OS: windows-latest
|
||||
GUI_TARGET: i686-pc-windows-msvc
|
||||
ARTIFACT_NAME: windows-i686
|
||||
|
||||
runs-on: ${{ matrix.OS }}
|
||||
env:
|
||||
NAME: easytier
|
||||
@@ -73,6 +78,56 @@ jobs:
|
||||
needs: pre_job
|
||||
if: needs.pre_job.outputs.should_skip != 'true'
|
||||
steps:
|
||||
- name: Install GUI dependencies (x86 only)
|
||||
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -qq libwebkit2gtk-4.1-dev \
|
||||
build-essential \
|
||||
curl \
|
||||
wget \
|
||||
file \
|
||||
libgtk-3-dev \
|
||||
librsvg2-dev \
|
||||
libxdo-dev \
|
||||
libssl-dev \
|
||||
patchelf
|
||||
|
||||
- name: Install GUI cross compile (aarch64 only)
|
||||
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
|
||||
run: |
|
||||
# see https://tauri.app/v1/guides/building/linux/
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
sudo dpkg --add-architecture arm64
|
||||
sudo apt update
|
||||
sudo apt install aptitude
|
||||
sudo aptitude install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64 \
|
||||
libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64 \
|
||||
libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu
|
||||
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
|
||||
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set current ref as env variable
|
||||
@@ -124,59 +179,13 @@ jobs:
|
||||
# GitHub repo token to use to avoid rate limiter
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install GUI dependencies (x86 only)
|
||||
if: ${{ matrix.TARGET == 'x86_64-unknown-linux-musl' }}
|
||||
run: |
|
||||
sudo apt install -qq libwebkit2gtk-4.1-dev \
|
||||
build-essential \
|
||||
curl \
|
||||
wget \
|
||||
file \
|
||||
libgtk-3-dev \
|
||||
librsvg2-dev \
|
||||
libxdo-dev \
|
||||
libssl-dev \
|
||||
patchelf
|
||||
|
||||
- name: Install GUI cross compile (aarch64 only)
|
||||
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
|
||||
run: |
|
||||
# see https://tauri.app/v1/guides/building/linux/
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
sudo dpkg --add-architecture arm64
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libgstreamer1.0-0:arm64 gstreamer1.0-plugins-base:arm64 gstreamer1.0-plugins-good:arm64
|
||||
sudo apt-get install -y libgstreamer-gl1.0-0:arm64 libgstreamer-plugins-base1.0-0:arm64 libgstreamer-plugins-good1.0-0:arm64 libwebkit2gtk-4.1-0:arm64
|
||||
sudo apt install -f -o Dpkg::Options::="--force-overwrite" libwebkit2gtk-4.1-dev:arm64 libssl-dev:arm64 gcc-aarch64-linux-gnu
|
||||
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
|
||||
echo "PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig/" >> "$GITHUB_ENV"
|
||||
|
||||
- name: copy correct DLLs
|
||||
if: ${{ matrix.OS == 'windows-latest' }}
|
||||
run: |
|
||||
if [[ $GUI_TARGET =~ ^aarch64.*$ ]]; then
|
||||
cp ./easytier/third_party/arm64/*.dll ./easytier-gui/src-tauri/
|
||||
elif [[ $GUI_TARGET =~ ^i686.*$ ]]; then
|
||||
cp ./easytier/third_party/i686/*.dll ./easytier-gui/src-tauri/
|
||||
else
|
||||
cp ./easytier/third_party/*.dll ./easytier-gui/src-tauri/
|
||||
fi
|
||||
|
||||
6
.github/workflows/install_rust.sh
vendored
6
.github/workflows/install_rust.sh
vendored
@@ -36,7 +36,7 @@ if [[ $OS =~ ^ubuntu.*$ ]]; then
|
||||
|
||||
if [ -n "$MUSL_URI" ]; then
|
||||
mkdir -p ./musl_gcc
|
||||
wget -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/
|
||||
wget --inet4-only -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/
|
||||
tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/
|
||||
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/
|
||||
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/${MUSL_URI}/include/ /usr/include/musl-cross
|
||||
@@ -45,8 +45,8 @@ fi
|
||||
|
||||
# see https://github.com/rust-lang/rustup/issues/3709
|
||||
rustup set auto-self-update disable
|
||||
rustup install 1.84
|
||||
rustup default 1.84
|
||||
rustup install 1.86
|
||||
rustup default 1.86
|
||||
|
||||
# mips/mipsel cannot add target from rustup, need compile by ourselves
|
||||
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
version:
|
||||
description: 'Version for this release'
|
||||
type: string
|
||||
default: 'v2.2.2'
|
||||
default: 'v2.3.0'
|
||||
required: true
|
||||
make_latest:
|
||||
description: 'Mark this release as latest'
|
||||
|
||||
29
.github/workflows/test.yml
vendored
29
.github/workflows/test.yml
vendored
@@ -47,11 +47,40 @@ jobs:
|
||||
|
||||
- name: Setup system for test
|
||||
run: |
|
||||
sudo modprobe br_netfilter
|
||||
sudo sysctl net.bridge.bridge-nf-call-iptables=0
|
||||
sudo sysctl net.bridge.bridge-nf-call-ip6tables=0
|
||||
sudo sysctl net.ipv6.conf.lo.disable_ipv6=0
|
||||
sudo ip addr add 2001:db8::2/64 dev lo
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 21
|
||||
|
||||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v3
|
||||
with:
|
||||
version: 9
|
||||
run_install: false
|
||||
|
||||
- name: Get pnpm store directory
|
||||
shell: bash
|
||||
run: |
|
||||
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup pnpm cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ env.STORE_PATH }}
|
||||
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pnpm-store-
|
||||
|
||||
- name: Install frontend dependencies
|
||||
run: |
|
||||
pnpm -r install
|
||||
pnpm -r --filter "./easytier-web/*" build
|
||||
|
||||
- name: Cargo cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
|
||||
724
Cargo.lock
generated
724
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
@@ -1,6 +1,12 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["easytier", "easytier-gui/src-tauri", "easytier-rpc-build", "easytier-web"]
|
||||
members = [
|
||||
"easytier",
|
||||
"easytier-gui/src-tauri",
|
||||
"easytier-rpc-build",
|
||||
"easytier-web",
|
||||
"easytier-contrib/easytier-ffi",
|
||||
]
|
||||
default-members = ["easytier", "easytier-web"]
|
||||
|
||||
[profile.dev]
|
||||
@@ -10,3 +16,5 @@ panic = "unwind"
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 'z'
|
||||
strip = true
|
||||
|
||||
34
README.md
34
README.md
@@ -1,10 +1,13 @@
|
||||
# EasyTier
|
||||
|
||||
[](https://github.com/EasyTier/EasyTier/releases)
|
||||
[](https://github.com/EasyTier/EasyTier/blob/main/LICENSE)
|
||||
[](https://github.com/EasyTier/EasyTier/commits/main)
|
||||
[](https://github.com/EasyTier/EasyTier/issues)
|
||||
[](https://github.com/EasyTier/EasyTier/actions/workflows/core.yml)
|
||||
[](https://github.com/EasyTier/EasyTier/actions/workflows/gui.yml)
|
||||
[](https://github.com/EasyTier/EasyTier/actions/workflows/test.yml)
|
||||
[](https://deepwiki.com/EasyTier/EasyTier)
|
||||
|
||||
[简体中文](/README_CN.md) | [English](/README.md)
|
||||
|
||||
@@ -61,7 +64,36 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented
|
||||
wget -O /tmp/easytier.sh "https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh" && bash /tmp/easytier.sh install
|
||||
```
|
||||
|
||||
You can also uninstall/update Easytier by the command "uninstall" or "update" of this script
|
||||
The script supports the following commands and options:
|
||||
|
||||
Commands:
|
||||
- `install`: Install EasyTier
|
||||
- `uninstall`: Uninstall EasyTier
|
||||
- `update`: Update EasyTier to the latest version
|
||||
- `help`: Show help message
|
||||
|
||||
Options:
|
||||
- `--skip-folder-verify`: Skip folder verification during installation
|
||||
- `--skip-folder-fix`: Skip automatic folder path fixing
|
||||
- `--no-gh-proxy`: Disable GitHub proxy
|
||||
- `--gh-proxy`: Set custom GitHub proxy URL (default: https://ghfast.top/)
|
||||
|
||||
Examples:
|
||||
```sh
|
||||
# Show help
|
||||
bash /tmp/easytier.sh help
|
||||
|
||||
# Install with options
|
||||
bash /tmp/easytier.sh install --skip-folder-verify
|
||||
bash /tmp/easytier.sh install --no-gh-proxy
|
||||
bash /tmp/easytier.sh install --gh-proxy https://your-proxy.com/
|
||||
|
||||
# Update EasyTier
|
||||
bash /tmp/easytier.sh update
|
||||
|
||||
# Uninstall EasyTier
|
||||
bash /tmp/easytier.sh uninstall
|
||||
```
|
||||
|
||||
6. **Install by Homebrew (For MacOS Only)**
|
||||
|
||||
|
||||
31
README_CN.md
31
README_CN.md
@@ -61,7 +61,36 @@
|
||||
wget -O /tmp/easytier.sh "https://raw.githubusercontent.com/EasyTier/EasyTier/main/script/install.sh" && bash /tmp/easytier.sh install
|
||||
```
|
||||
|
||||
使用本脚本安装的 Easytier 可以使用脚本的 uninstall/update 对其卸载/升级
|
||||
脚本支持以下命令和选项:
|
||||
|
||||
命令:
|
||||
- `install`: 安装 EasyTier
|
||||
- `uninstall`: 卸载 EasyTier
|
||||
- `update`: 更新 EasyTier 到最新版本
|
||||
- `help`: 显示帮助信息
|
||||
|
||||
选项:
|
||||
- `--skip-folder-verify`: 跳过安装过程中的文件夹验证
|
||||
- `--skip-folder-fix`: 跳过自动修复文件夹路径
|
||||
- `--no-gh-proxy`: 禁用 GitHub 代理
|
||||
- `--gh-proxy`: 设置自定义 GitHub 代理 URL (默认值: https://ghfast.top/)
|
||||
|
||||
示例:
|
||||
```sh
|
||||
# 查看帮助
|
||||
bash /tmp/easytier.sh help
|
||||
|
||||
# 安装(带选项)
|
||||
bash /tmp/easytier.sh install --skip-folder-verify
|
||||
bash /tmp/easytier.sh install --no-gh-proxy
|
||||
bash /tmp/easytier.sh install --gh-proxy https://your-proxy.com/
|
||||
|
||||
# 更新 EasyTier
|
||||
bash /tmp/easytier.sh update
|
||||
|
||||
# 卸载 EasyTier
|
||||
bash /tmp/easytier.sh uninstall
|
||||
```
|
||||
|
||||
6. **使用 Homebrew 安装 (仅适用于 MacOS)**
|
||||
|
||||
|
||||
16
easytier-contrib/easytier-ffi/Cargo.toml
Normal file
16
easytier-contrib/easytier-ffi/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "easytier-ffi"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[dependencies]
|
||||
easytier = { path = "../../easytier" }
|
||||
|
||||
once_cell = "1.18.0"
|
||||
dashmap = "6.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
159
easytier-contrib/easytier-ffi/examples/csharp.cs
Normal file
159
easytier-contrib/easytier-ffi/examples/csharp.cs
Normal file
@@ -0,0 +1,159 @@
|
||||
public class EasyTierFFI
|
||||
{
|
||||
// 导入 DLL 函数
|
||||
private const string DllName = "easytier_ffi.dll";
|
||||
|
||||
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern int parse_config([MarshalAs(UnmanagedType.LPStr)] string cfgStr);
|
||||
|
||||
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern int run_network_instance([MarshalAs(UnmanagedType.LPStr)] string cfgStr);
|
||||
|
||||
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern int retain_network_instance(IntPtr instNames, int length);
|
||||
|
||||
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern int collect_network_infos(IntPtr infos, int maxLength);
|
||||
|
||||
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern void get_error_msg(out IntPtr errorMsg);
|
||||
|
||||
[DllImport(DllName, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern void free_string(IntPtr str);
|
||||
|
||||
// 定义 KeyValuePair 结构体
|
||||
[StructLayout(LayoutKind.Sequential)]
|
||||
public struct KeyValuePair
|
||||
{
|
||||
public IntPtr Key;
|
||||
public IntPtr Value;
|
||||
}
|
||||
|
||||
// 解析配置
|
||||
public static void ParseConfig(string config)
|
||||
{
|
||||
if (string.IsNullOrEmpty(config))
|
||||
{
|
||||
throw new ArgumentException("Configuration string cannot be null or empty.");
|
||||
}
|
||||
|
||||
int result = parse_config(config);
|
||||
if (result < 0)
|
||||
{
|
||||
throw new Exception(GetErrorMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// 启动网络实例
|
||||
public static void RunNetworkInstance(string config)
|
||||
{
|
||||
if (string.IsNullOrEmpty(config))
|
||||
{
|
||||
throw new ArgumentException("Configuration string cannot be null or empty.");
|
||||
}
|
||||
|
||||
int result = run_network_instance(config);
|
||||
if (result < 0)
|
||||
{
|
||||
throw new Exception(GetErrorMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// 保留网络实例
|
||||
public static void RetainNetworkInstances(string[] instanceNames)
|
||||
{
|
||||
IntPtr[] namePointers = null;
|
||||
IntPtr namesPtr = IntPtr.Zero;
|
||||
|
||||
try
|
||||
{
|
||||
if (instanceNames != null && instanceNames.Length > 0)
|
||||
{
|
||||
namePointers = new IntPtr[instanceNames.Length];
|
||||
for (int i = 0; i < instanceNames.Length; i++)
|
||||
{
|
||||
if (string.IsNullOrEmpty(instanceNames[i]))
|
||||
{
|
||||
throw new ArgumentException("Instance name cannot be null or empty.");
|
||||
}
|
||||
namePointers[i] = Marshal.StringToHGlobalAnsi(instanceNames[i]);
|
||||
}
|
||||
|
||||
namesPtr = Marshal.AllocHGlobal(Marshal.SizeOf<IntPtr>() * namePointers.Length);
|
||||
Marshal.Copy(namePointers, 0, namesPtr, namePointers.Length);
|
||||
}
|
||||
|
||||
int result = retain_network_instance(namesPtr, instanceNames?.Length ?? 0);
|
||||
if (result < 0)
|
||||
{
|
||||
throw new Exception(GetErrorMessage());
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (namePointers != null)
|
||||
{
|
||||
foreach (var ptr in namePointers)
|
||||
{
|
||||
if (ptr != IntPtr.Zero)
|
||||
{
|
||||
Marshal.FreeHGlobal(ptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (namesPtr != IntPtr.Zero)
|
||||
{
|
||||
Marshal.FreeHGlobal(namesPtr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 收集网络信息
|
||||
public static KeyValuePair<string, string>[] CollectNetworkInfos(int maxLength)
|
||||
{
|
||||
IntPtr buffer = Marshal.AllocHGlobal(Marshal.SizeOf<KeyValuePair>() * maxLength);
|
||||
try
|
||||
{
|
||||
int count = collect_network_infos(buffer, maxLength);
|
||||
if (count < 0)
|
||||
{
|
||||
throw new Exception(GetErrorMessage());
|
||||
}
|
||||
|
||||
var result = new KeyValuePair<string, string>[count];
|
||||
for (int i = 0; i < count; i++)
|
||||
{
|
||||
var kv = Marshal.PtrToStructure<KeyValuePair>(buffer + i * Marshal.SizeOf<KeyValuePair>());
|
||||
string key = Marshal.PtrToStringAnsi(kv.Key);
|
||||
string value = Marshal.PtrToStringAnsi(kv.Value);
|
||||
|
||||
// 释放由 FFI 分配的字符串内存
|
||||
free_string(kv.Key);
|
||||
free_string(kv.Value);
|
||||
|
||||
result[i] = new KeyValuePair<string, string>(key, value);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
finally
|
||||
{
|
||||
Marshal.FreeHGlobal(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
// 获取错误信息
|
||||
private static string GetErrorMessage()
|
||||
{
|
||||
get_error_msg(out IntPtr errorMsgPtr);
|
||||
if (errorMsgPtr == IntPtr.Zero)
|
||||
{
|
||||
return "Unknown error";
|
||||
}
|
||||
|
||||
string errorMsg = Marshal.PtrToStringAnsi(errorMsgPtr);
|
||||
free_string(errorMsgPtr); // 释放错误信息字符串
|
||||
return errorMsg;
|
||||
}
|
||||
}
|
||||
199
easytier-contrib/easytier-ffi/src/lib.rs
Normal file
199
easytier-contrib/easytier-ffi/src/lib.rs
Normal file
@@ -0,0 +1,199 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::config::{ConfigLoader as _, TomlConfigLoader},
|
||||
launcher::NetworkInstance,
|
||||
};
|
||||
|
||||
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> =
|
||||
once_cell::sync::Lazy::new(DashMap::new);
|
||||
|
||||
static ERROR_MSG: once_cell::sync::Lazy<Mutex<Vec<u8>>> =
|
||||
once_cell::sync::Lazy::new(|| Mutex::new(Vec::new()));
|
||||
|
||||
#[repr(C)]
|
||||
pub struct KeyValuePair {
|
||||
pub key: *const std::ffi::c_char,
|
||||
pub value: *const std::ffi::c_char,
|
||||
}
|
||||
|
||||
fn set_error_msg(msg: &str) {
|
||||
let bytes = msg.as_bytes();
|
||||
let mut msg_buf = ERROR_MSG.lock().unwrap();
|
||||
let len = bytes.len();
|
||||
msg_buf.resize(len, 0);
|
||||
msg_buf[..len].copy_from_slice(bytes);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn get_error_msg(out: *mut *const std::ffi::c_char) {
|
||||
let msg_buf = ERROR_MSG.lock().unwrap();
|
||||
if msg_buf.is_empty() {
|
||||
unsafe {
|
||||
*out = std::ptr::null();
|
||||
}
|
||||
return;
|
||||
}
|
||||
let cstr = std::ffi::CString::new(&msg_buf[..]).unwrap();
|
||||
unsafe {
|
||||
*out = cstr.into_raw();
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn free_string(s: *const std::ffi::c_char) {
|
||||
if s.is_null() {
|
||||
return;
|
||||
}
|
||||
unsafe {
|
||||
let _ = std::ffi::CString::from_raw(s as *mut std::ffi::c_char);
|
||||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn parse_config(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
|
||||
let cfg_str = unsafe {
|
||||
assert!(!cfg_str.is_null());
|
||||
std::ffi::CStr::from_ptr(cfg_str)
|
||||
.to_string_lossy()
|
||||
.into_owned()
|
||||
};
|
||||
|
||||
if let Err(e) = TomlConfigLoader::new_from_str(&cfg_str) {
|
||||
set_error_msg(&format!("failed to parse config: {:?}", e));
|
||||
return -1;
|
||||
}
|
||||
|
||||
0
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std::ffi::c_int {
|
||||
let cfg_str = unsafe {
|
||||
assert!(!cfg_str.is_null());
|
||||
std::ffi::CStr::from_ptr(cfg_str)
|
||||
.to_string_lossy()
|
||||
.into_owned()
|
||||
};
|
||||
let cfg = match TomlConfigLoader::new_from_str(&cfg_str) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to parse config: {}", e));
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
let inst_name = cfg.get_inst_name();
|
||||
|
||||
if INSTANCE_MAP.contains_key(&inst_name) {
|
||||
set_error_msg("instance already exists");
|
||||
return -1;
|
||||
}
|
||||
|
||||
let mut instance = NetworkInstance::new(cfg);
|
||||
if let Err(e) = instance.start().map_err(|e| e.to_string()) {
|
||||
set_error_msg(&format!("failed to start instance: {}", e));
|
||||
return -1;
|
||||
}
|
||||
|
||||
INSTANCE_MAP.insert(inst_name, instance);
|
||||
|
||||
0
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn retain_network_instance(
|
||||
inst_names: *const *const std::ffi::c_char,
|
||||
length: usize,
|
||||
) -> std::ffi::c_int {
|
||||
if length == 0 {
|
||||
INSTANCE_MAP.clear();
|
||||
return 0;
|
||||
}
|
||||
|
||||
let inst_names = unsafe {
|
||||
assert!(!inst_names.is_null());
|
||||
std::slice::from_raw_parts(inst_names, length)
|
||||
.iter()
|
||||
.map(|&name| {
|
||||
assert!(!name.is_null());
|
||||
std::ffi::CStr::from_ptr(name)
|
||||
.to_string_lossy()
|
||||
.into_owned()
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let _ = INSTANCE_MAP.retain(|k, _| inst_names.contains(k));
|
||||
|
||||
0
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn collect_network_infos(
|
||||
infos: *mut KeyValuePair,
|
||||
max_length: usize,
|
||||
) -> std::ffi::c_int {
|
||||
if max_length == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let infos = unsafe {
|
||||
assert!(!infos.is_null());
|
||||
std::slice::from_raw_parts_mut(infos, max_length)
|
||||
};
|
||||
|
||||
let mut index = 0;
|
||||
for instance in INSTANCE_MAP.iter() {
|
||||
if index >= max_length {
|
||||
break;
|
||||
}
|
||||
let key = instance.key();
|
||||
let Some(value) = instance.get_running_info() else {
|
||||
continue;
|
||||
};
|
||||
// convert value to json string
|
||||
let value = match serde_json::to_string(&value) {
|
||||
Ok(value) => value,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to serialize instance info: {}", e));
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
infos[index] = KeyValuePair {
|
||||
key: std::ffi::CString::new(key.clone()).unwrap().into_raw(),
|
||||
value: std::ffi::CString::new(value).unwrap().into_raw(),
|
||||
};
|
||||
index += 1;
|
||||
}
|
||||
|
||||
index as std::ffi::c_int
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_config() {
|
||||
let cfg_str = r#"
|
||||
inst_name = "test"
|
||||
network = "test_network"
|
||||
fdsafdsa
|
||||
"#;
|
||||
let cstr = std::ffi::CString::new(cfg_str).unwrap();
|
||||
assert_eq!(parse_config(cstr.as_ptr()), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_run_network_instance() {
|
||||
let cfg_str = r#"
|
||||
inst_name = "test"
|
||||
network = "test_network"
|
||||
"#;
|
||||
let cstr = std::ffi::CString::new(cfg_str).unwrap();
|
||||
assert_eq!(run_network_instance(cstr.as_ptr()), 0);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
#!/sbin/sh
|
||||
|
||||
#################
|
||||
# Initialization
|
||||
#################
|
||||
|
||||
umask 022
|
||||
|
||||
# echo before loading util_functions
|
||||
ui_print() { echo "$1"; }
|
||||
|
||||
require_new_magisk() {
|
||||
ui_print "********************************"
|
||||
ui_print " Please install Magisk v20.4+! "
|
||||
ui_print "********************************"
|
||||
exit 1
|
||||
}
|
||||
|
||||
#########################
|
||||
# Load util_functions.sh
|
||||
#########################
|
||||
|
||||
OUTFD=$2
|
||||
ZIPFILE=$3
|
||||
|
||||
mount /data 2>/dev/null
|
||||
|
||||
[ -f /data/adb/magisk/util_functions.sh ] || require_new_magisk
|
||||
. /data/adb/magisk/util_functions.sh
|
||||
[ $MAGISK_VER_CODE -lt 20400 ] && require_new_magisk
|
||||
|
||||
install_module
|
||||
exit 0
|
||||
@@ -0,0 +1 @@
|
||||
#MAGISK
|
||||
6
easytier-contrib/easytier-magisk/README.md
Normal file
6
easytier-contrib/easytier-magisk/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# easytier_magisk版模块
|
||||
magisk安装后重启
|
||||
|
||||
目录位置:/data/adb/modules/easytier_magisk
|
||||
配置文件位置://data/adb/modules/easytier_magisk/config/config.conf
|
||||
修改config.conf即可,修改后配置文件后去magisk app重新开关模块即可生效
|
||||
16
easytier-contrib/easytier-magisk/action.sh
Normal file
16
easytier-contrib/easytier-magisk/action.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/data/adb/magisk/busybox sh
|
||||
MODDIR=${0%/*}
|
||||
echo 'Easytier 服务停止中....'
|
||||
|
||||
PIDS=$(pgrep -f "^${MODDIR}/easytier-core -c ${MODDIR}/config/config.conf")
|
||||
|
||||
if [ -n "$PIDS" ]; then
|
||||
kill $PIDS # 杀死所有匹配的进程
|
||||
echo "已停止所有 Easytier 进程 (PIDs: $PIDS)"
|
||||
else
|
||||
echo "Easytier 服务未运行"
|
||||
fi
|
||||
echo '重启服务中...'
|
||||
nohup sh ${MODDIR}/service.sh >> ${MODDIR}/log/start.log 2>&1 &
|
||||
echo '服务已重启'
|
||||
exit
|
||||
25
easytier-contrib/easytier-magisk/build.sh
Normal file
25
easytier-contrib/easytier-magisk/build.sh
Normal file
@@ -0,0 +1,25 @@
|
||||
#!/bin/sh
|
||||
|
||||
version=$(cat module.prop | grep 'version=' | awk -F '=' '{print $2}' | sed 's/ (.*//')
|
||||
|
||||
version='v'$(grep '^version =' ../../easytier/Cargo.toml | cut -d '"' -f 2)
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
echo "Error: 版本号不存在."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
filename="easytier_magisk_${version}.zip"
|
||||
echo $version
|
||||
|
||||
|
||||
if [ -f "./easytier-core" ] && [ -f "./easytier-cli" ] && [ -f "./easytier-web" ]; then
|
||||
zip -r -o -X "$filename" ./ -x '.git/*' -x '.github/*' -x 'folder/*' -x 'build.sh' -x 'magisk_update.json'
|
||||
else
|
||||
wget -O "easytier_last.zip" https://github.com/EasyTier/EasyTier/releases/download/"$version"/easytier-linux-aarch64-"$version".zip
|
||||
unzip -o easytier_last.zip -d ./
|
||||
mv ./easytier-linux-aarch64/* ./
|
||||
rm -rf ./easytier_last.zip
|
||||
rm -rf ./easytier-linux-aarch64
|
||||
zip -r -o -X "$filename" ./ -x '.git/*' -x '.github/*' -x 'folder/*' -x 'build.sh' -x 'magisk_update.json'
|
||||
fi
|
||||
37
easytier-contrib/easytier-magisk/config/config.conf
Normal file
37
easytier-contrib/easytier-magisk/config/config.conf
Normal file
@@ -0,0 +1,37 @@
|
||||
instance_name = "default"
|
||||
dhcp = false
|
||||
#ipv4="本机ip"
|
||||
listeners = [
|
||||
"tcp://0.0.0.0:11010",
|
||||
"udp://0.0.0.0:11010",
|
||||
"wg://0.0.0.0:11011",
|
||||
"ws://0.0.0.0:11011/",
|
||||
"wss://0.0.0.0:11012/",
|
||||
]
|
||||
mapped_listeners = []
|
||||
exit_nodes = []
|
||||
rpc_portal = "0.0.0.0:15888"
|
||||
|
||||
[network_identity]
|
||||
network_name = "default"
|
||||
network_secret = ""
|
||||
|
||||
[[peer]]
|
||||
#uri = "协议://中转ip:端口"
|
||||
|
||||
[flags]
|
||||
default_protocol = "tcp"
|
||||
dev_name = ""
|
||||
enable_encryption = true
|
||||
enable_ipv6 = true
|
||||
mtu = 1380
|
||||
latency_first = false
|
||||
enable_exit_node = false
|
||||
no_tun = false
|
||||
use_smoltcp = false
|
||||
foreign_network_whitelist = "*"
|
||||
disable_p2p = false
|
||||
relay_all_peer_rpc = false
|
||||
disable_udp_hole_punching = false
|
||||
|
||||
|
||||
7
easytier-contrib/easytier-magisk/customize.sh
Normal file
7
easytier-contrib/easytier-magisk/customize.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
ui_print '安装完成'
|
||||
ui_print '当前架构为' + $ARCH
|
||||
ui_print '当前系统版本为' + $API
|
||||
ui_print '安装目录为: /data/adb/modules/easytier_magisk'
|
||||
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.conf'
|
||||
ui_print '修改后配置文件后在magisk app点击操作按钮即可生效'
|
||||
ui_print '记得重启'
|
||||
6
easytier-contrib/easytier-magisk/magisk_update.json
Normal file
6
easytier-contrib/easytier-magisk/magisk_update.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"version": "v1.0",
|
||||
"versionCode": 1,
|
||||
"zipUrl": "",
|
||||
"changelog": ""
|
||||
}
|
||||
7
easytier-contrib/easytier-magisk/module.prop
Normal file
7
easytier-contrib/easytier-magisk/module.prop
Normal file
@@ -0,0 +1,7 @@
|
||||
id=easytier_magisk
|
||||
name=easytier_magisk版
|
||||
version=v2.2.4
|
||||
versionCode=1
|
||||
author=EasyTier
|
||||
description=easytier_magisk版模块 作者:EasyTier https://github.com/EasyTier/EasyTier
|
||||
updateJson=https://raw.githubusercontent.com/EasyTier/EasyTier/refs/heads/main/easytier-contrib/easytier-magisk/magisk_update.json
|
||||
20
easytier-contrib/easytier-magisk/service.sh
Normal file
20
easytier-contrib/easytier-magisk/service.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/data/adb/magisk/busybox sh
|
||||
MODDIR=${0%/*}
|
||||
# MODDIR="$(dirname $(readlink -f "$0"))"
|
||||
mkdir -p ${MODDIR}/log
|
||||
chmod 755 ${MODDIR}/*
|
||||
|
||||
echo $MODDIR >> ${MODDIR}/log/start.log
|
||||
|
||||
echo "Easytier 服务启动" >> ${MODDIR}/log/start.log
|
||||
|
||||
# 启动
|
||||
nohup ${MODDIR}/easytier-core -c ${MODDIR}/config/config.conf >> ${MODDIR}/log/start.log 2>&1 &
|
||||
|
||||
# 检查是否启用模块
|
||||
while [ ! -f ${MODDIR}/disable ]; do
|
||||
sleep 2
|
||||
done
|
||||
PID=$(ps -ef|grep "${MODDIR}/easytier-core -c ${MODDIR}/config/config.conf" | awk '{print $2}')
|
||||
kill $PID
|
||||
echo "Easytier 服务停止" >> ${MODDIR}/log/start.log
|
||||
2
easytier-contrib/easytier-magisk/uninstall.sh
Normal file
2
easytier-contrib/easytier-magisk/uninstall.sh
Normal file
@@ -0,0 +1,2 @@
|
||||
MODDIR=${0%/*}
|
||||
rm -rf $MODDIR/*
|
||||
@@ -113,3 +113,4 @@ event:
|
||||
VpnPortalClientDisconnected: VPN门户客户端已断开连接
|
||||
DhcpIpv4Changed: DHCP IPv4地址更改
|
||||
DhcpIpv4Conflicted: DHCP IPv4地址冲突
|
||||
PortForwardAdded: 端口转发添加
|
||||
|
||||
@@ -112,3 +112,4 @@ event:
|
||||
VpnPortalClientDisconnected: VpnPortalClientDisconnected
|
||||
DhcpIpv4Changed: DhcpIpv4Changed
|
||||
DhcpIpv4Conflicted: DhcpIpv4Conflicted
|
||||
PortForwardAdded: PortForwardAdded
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "easytier-gui",
|
||||
"type": "module",
|
||||
"version": "2.2.2",
|
||||
"version": "2.2.4",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
|
||||
"scripts": {
|
||||
@@ -13,7 +13,7 @@
|
||||
"lint:fix": "eslint . --ignore-pattern src-tauri --fix"
|
||||
},
|
||||
"dependencies": {
|
||||
"@primevue/themes": "^4.2.1",
|
||||
"@primevue/themes": "4.3.3",
|
||||
"@tauri-apps/plugin-autostart": "2.0.0",
|
||||
"@tauri-apps/plugin-clipboard-manager": "2.0.0",
|
||||
"@tauri-apps/plugin-os": "2.0.0",
|
||||
@@ -24,7 +24,7 @@
|
||||
"easytier-frontend-lib": "workspace:*",
|
||||
"ip-num": "1.5.1",
|
||||
"pinia": "^2.2.4",
|
||||
"primevue": "^4.2.1",
|
||||
"primevue": "4.3.3",
|
||||
"tauri-plugin-vpnservice-api": "workspace:*",
|
||||
"vue": "^3.5.12",
|
||||
"vue-router": "^4.4.5"
|
||||
@@ -32,7 +32,7 @@
|
||||
"devDependencies": {
|
||||
"@antfu/eslint-config": "^3.7.3",
|
||||
"@intlify/unplugin-vue-i18n": "^5.2.0",
|
||||
"@primevue/auto-import-resolver": "^4.1.0",
|
||||
"@primevue/auto-import-resolver": "4.3.3",
|
||||
"@tauri-apps/api": "2.1.0",
|
||||
"@tauri-apps/cli": "2.1.0",
|
||||
"@types/default-gateway": "^7.2.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "easytier-gui"
|
||||
version = "2.2.2"
|
||||
version = "2.3.0"
|
||||
description = "EasyTier GUI"
|
||||
authors = ["you"]
|
||||
edition = "2021"
|
||||
@@ -14,6 +14,13 @@ crate-type = ["staticlib", "cdylib", "rlib"]
|
||||
[build-dependencies]
|
||||
tauri-build = { version = "2.0.0-rc", features = [] }
|
||||
|
||||
# enable thunk-rs when compiling for x86_64 or i686 windows
|
||||
[target.x86_64-pc-windows-msvc.build-dependencies]
|
||||
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
|
||||
|
||||
[target.i686-pc-windows-msvc.build-dependencies]
|
||||
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
|
||||
|
||||
[dependencies]
|
||||
# wry 0.47 may crash on android, see https://github.com/EasyTier/EasyTier/issues/527
|
||||
tauri = { version = "=2.0.6", features = [
|
||||
@@ -53,4 +60,4 @@ tauri-plugin-autostart = "2.0"
|
||||
custom-protocol = ["tauri/custom-protocol"]
|
||||
|
||||
[target.'cfg(not(any(target_os = "android", target_os = "ios")))'.dependencies]
|
||||
tauri-plugin-single-instance = "2.0.0-rc.0"
|
||||
tauri-plugin-single-instance = "2.2.3"
|
||||
|
||||
@@ -1,3 +1,12 @@
|
||||
fn main() {
|
||||
// enable thunk-rs when target os is windows and arch is x86_64 or i686
|
||||
#[cfg(target_os = "windows")]
|
||||
if !std::env::var("TARGET")
|
||||
.unwrap_or_default()
|
||||
.contains("aarch64")
|
||||
{
|
||||
thunk::thunk();
|
||||
}
|
||||
|
||||
tauri_build::build();
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"createUpdaterArtifacts": false
|
||||
},
|
||||
"productName": "easytier-gui",
|
||||
"version": "2.2.2",
|
||||
"version": "2.3.0",
|
||||
"identifier": "com.kkrainbow.easytier",
|
||||
"plugins": {},
|
||||
"app": {
|
||||
|
||||
@@ -132,6 +132,14 @@ async function onNetworkInstanceChange() {
|
||||
return
|
||||
}
|
||||
|
||||
// if use no tun mode, stop the vpn service
|
||||
const no_tun = networkStore.isNoTunEnabled(insts[0])
|
||||
if (no_tun) {
|
||||
console.error('no tun mode, stop vpn service')
|
||||
await doStopVpn()
|
||||
return
|
||||
}
|
||||
|
||||
let network_length = curNetworkInfo?.my_node_info?.virtual_ipv4.network_length
|
||||
if (!network_length) {
|
||||
network_length = 24
|
||||
|
||||
@@ -128,6 +128,13 @@ export const useNetworkStore = defineStore('networkStore', {
|
||||
}
|
||||
this.saveAutoStartInstIdsToLocalStorage()
|
||||
},
|
||||
|
||||
isNoTunEnabled(instanceId: string): boolean {
|
||||
const cfg = this.networkList.find((cfg) => cfg.instance_id === instanceId)
|
||||
if (!cfg)
|
||||
return false
|
||||
return cfg.no_tun ?? false
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -45,3 +45,11 @@
|
||||
border-radius: 4px;
|
||||
background-color: #0000005d;
|
||||
}
|
||||
|
||||
.p-password {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.p-password>input {
|
||||
width: 100%;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "easytier-web"
|
||||
version = "2.2.2"
|
||||
version = "2.3.0"
|
||||
edition = "2021"
|
||||
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
|
||||
|
||||
@@ -18,13 +18,18 @@ axum = { version = "0.7", features = ["macros"] }
|
||||
axum-login = { version = "0.16" }
|
||||
password-auth = { version = "1.0.0" }
|
||||
axum-messages = "0.7.0"
|
||||
axum-embed = { version = "0.1.0", optional = true }
|
||||
tower-sessions-sqlx-store = { version = "0.14.1", features = ["sqlite"] }
|
||||
tower-sessions = { version = "0.13.0", default-features = false, features = [
|
||||
"signed",
|
||||
] }
|
||||
tower-http = { version = "0.6", features = ["cors", "compression-full"] }
|
||||
sqlx = { version = "0.8", features = ["sqlite"] }
|
||||
sea-orm = { version = "1.1", features = [ "sqlx-sqlite", "runtime-tokio-rustls", "macros" ] }
|
||||
sea-orm = { version = "1.1", features = [
|
||||
"sqlx-sqlite",
|
||||
"runtime-tokio-rustls",
|
||||
"macros",
|
||||
] }
|
||||
sea-orm-migration = { version = "1.1" }
|
||||
|
||||
|
||||
@@ -32,7 +37,7 @@ sea-orm-migration = { version = "1.1" }
|
||||
rust-embed = { version = "8.5.0", features = ["debug-embed"] }
|
||||
base64 = "0.22"
|
||||
rand = "0.8"
|
||||
image = {version="0.24", default-features = false, features = ["png"]}
|
||||
image = { version = "0.24", default-features = false, features = ["png"] }
|
||||
rusttype = "0.9.3"
|
||||
imageproc = "0.23.0"
|
||||
|
||||
@@ -55,3 +60,14 @@ uuid = { version = "1.5.0", features = [
|
||||
] }
|
||||
|
||||
chrono = { version = "0.4.37", features = ["serde"] }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
embed = ["dep:axum-embed"]
|
||||
|
||||
# enable thunk-rs when compiling for x86_64 or i686 windows
|
||||
[target.x86_64-pc-windows-msvc.build-dependencies]
|
||||
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
|
||||
|
||||
[target.i686-pc-windows-msvc.build-dependencies]
|
||||
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
|
||||
|
||||
7
easytier-web/build.rs
Normal file
7
easytier-web/build.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
fn main() {
|
||||
// enable thunk-rs when target os is windows and arch is x86_64 or i686
|
||||
#[cfg(target_os = "windows")]
|
||||
if !std::env::var("TARGET").unwrap_or_default().contains("aarch64"){
|
||||
thunk::thunk();
|
||||
}
|
||||
}
|
||||
@@ -18,14 +18,14 @@
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@primevue/themes": "^4.2.1",
|
||||
"@primevue/themes": "4.3.3",
|
||||
"@vueuse/core": "^11.1.0",
|
||||
"aura": "link:@primevue\\themes\\aura",
|
||||
"axios": "^1.7.7",
|
||||
"floating-vue": "^5.2",
|
||||
"ip-num": "1.5.1",
|
||||
"primeicons": "^7.0.0",
|
||||
"primevue": "^4.2.1",
|
||||
"primevue": "4.3.3",
|
||||
"tailwindcss-primeui": "^0.3.4",
|
||||
"ts-md5": "^1.3.1",
|
||||
"uuid": "^11.0.2",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
<script setup lang="ts">
|
||||
import InputGroup from 'primevue/inputgroup'
|
||||
import InputGroupAddon from 'primevue/inputgroupaddon'
|
||||
import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button } from 'primevue'
|
||||
import { SelectButton, Checkbox, InputText, InputNumber, AutoComplete, Panel, Divider, ToggleButton, Button, Password } from 'primevue'
|
||||
import { DEFAULT_NETWORK_CONFIG, NetworkConfig, NetworkingMethod } from '../types/network'
|
||||
import { defineProps, defineEmits, ref, } from 'vue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
@@ -155,6 +155,8 @@ const bool_flags: BoolFlag[] = [
|
||||
{ field: 'multi_thread', help: 'multi_thread_help' },
|
||||
{ field: 'proxy_forward_by_system', help: 'proxy_forward_by_system_help' },
|
||||
{ field: 'disable_encryption', help: 'disable_encryption_help' },
|
||||
{ field: 'disable_udp_hole_punching', help: 'disable_udp_hole_punching_help' },
|
||||
{ field: 'enable_magic_dns', help: 'enable_magic_dns_help' },
|
||||
]
|
||||
|
||||
</script>
|
||||
@@ -196,8 +198,8 @@ const bool_flags: BoolFlag[] = [
|
||||
</div>
|
||||
<div class="flex flex-col gap-2 basis-5/12 grow">
|
||||
<label for="network_secret">{{ t('network_secret') }}</label>
|
||||
<InputText id="network_secret" v-model="curNetwork.network_secret"
|
||||
aria-describedby="network_secret-help" />
|
||||
<Password id="network_secret" v-model="curNetwork.network_secret"
|
||||
aria-describedby="network_secret-help" toggleMask :feedback="false"/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -309,6 +311,18 @@ const bool_flags: BoolFlag[] = [
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-row gap-x-9 flex-wrap">
|
||||
<div class="flex flex-col gap-2 basis-5/12 grow">
|
||||
<div class="flex">
|
||||
<label for="mtu">{{ t('mtu') }}</label>
|
||||
<span class="pi pi-question-circle ml-2 self-center"
|
||||
v-tooltip="t('mtu_help')"></span>
|
||||
</div>
|
||||
<InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help"
|
||||
:format="false" :placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-row gap-x-9 flex-wrap">
|
||||
<div class="flex flex-col gap-2 basis-5/12 grow">
|
||||
<div class="flex">
|
||||
@@ -375,6 +389,18 @@ const bool_flags: BoolFlag[] = [
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-row gap-x-9 flex-wrap w-full">
|
||||
<div class="flex flex-col gap-2 grow p-fluid">
|
||||
<div class="flex">
|
||||
<label for="mapped_listeners">{{ t('mapped_listeners') }}</label>
|
||||
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mapped_listeners_help')"></span>
|
||||
</div>
|
||||
<AutoComplete id="mapped_listeners" v-model="curNetwork.mapped_listeners"
|
||||
:placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full"
|
||||
multiple fluid :suggestions="peerSuggestions" @complete="searchPeerSuggestions" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</Panel>
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import { NetworkInstance, type NodeInfo, type PeerRoutePair } from '../types/net
|
||||
import { useI18n } from 'vue-i18n';
|
||||
import { computed, onMounted, onUnmounted, ref } from 'vue';
|
||||
import { ipv4InetToString, ipv4ToString, ipv6ToString } from '../modules/utils';
|
||||
import { DataTable, Column, Tag, Chip, Button, Dialog, ScrollPanel, Timeline, Divider, Panel, } from 'primevue';
|
||||
import { DataTable, Column, Tag, Chip, Button, Dialog, ScrollPanel, Timeline, Divider, Card, } from 'primevue';
|
||||
|
||||
const props = defineProps<{
|
||||
curNetworkInst: NetworkInstance | null,
|
||||
@@ -303,15 +303,15 @@ function showEventLogs() {
|
||||
|
||||
<template>
|
||||
<div class="frontend-lib">
|
||||
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" class="w-2/3 h-auto max-h-full"
|
||||
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" class="w-full h-auto max-h-full"
|
||||
:baseZIndex="2000">
|
||||
<ScrollPanel v-if="dialogHeader === 'vpn_portal_config'" class="w-2/3">
|
||||
<ScrollPanel v-if="dialogHeader === 'vpn_portal_config'">
|
||||
<pre>{{ dialogContent }}</pre>
|
||||
</ScrollPanel>
|
||||
<Timeline v-else :value="dialogContent">
|
||||
<template #opposite="slotProps">
|
||||
<small class="text-surface-500 dark:text-surface-400">{{ useTimeAgo(Date.parse(slotProps.item.time))
|
||||
}}</small>
|
||||
}}</small>
|
||||
</template>
|
||||
<template #content="slotProps">
|
||||
<HumanEvent :event="slotProps.item.event" />
|
||||
@@ -319,101 +319,107 @@ function showEventLogs() {
|
||||
</Timeline>
|
||||
</Dialog>
|
||||
|
||||
<Panel v-if="curNetworkInst?.error_msg">
|
||||
<template #header>
|
||||
<Card v-if="curNetworkInst?.error_msg">
|
||||
<template #title>
|
||||
Run Network Error
|
||||
</template>
|
||||
<div class="flex flex-col gap-y-5">
|
||||
<div class="text-red-500">
|
||||
{{ curNetworkInst.error_msg }}
|
||||
<template #content>
|
||||
<div class="flex flex-col gap-y-5">
|
||||
<div class="text-red-500">
|
||||
{{ curNetworkInst.error_msg }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</Panel>
|
||||
</template>
|
||||
</Card>
|
||||
|
||||
<template v-else>
|
||||
<Panel>
|
||||
<template #header>
|
||||
<Card>
|
||||
<template #title>
|
||||
{{ t('my_node_info') }}
|
||||
</template>
|
||||
<div class="flex w-full flex-col gap-y-5">
|
||||
<div class="m-0 flex flex-row justify-center gap-x-5">
|
||||
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid green">
|
||||
<div class="font-bold">
|
||||
{{ t('peer_count') }}
|
||||
<template #content>
|
||||
<div class="flex w-full flex-col gap-y-5">
|
||||
<div class="m-0 flex flex-row justify-center gap-x-5">
|
||||
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid green">
|
||||
<div class="font-bold">
|
||||
{{ t('peer_count') }}
|
||||
</div>
|
||||
<div class="text-5xl mt-1">
|
||||
{{ peerCount }}
|
||||
</div>
|
||||
</div>
|
||||
<div class="text-5xl mt-1">
|
||||
{{ peerCount }}
|
||||
|
||||
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid purple">
|
||||
<div class="font-bold">
|
||||
{{ t('upload') }}
|
||||
</div>
|
||||
<div class="text-xl mt-2">
|
||||
{{ txRate }}/s
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid fuchsia">
|
||||
<div class="font-bold">
|
||||
{{ t('download') }}
|
||||
</div>
|
||||
<div class="text-xl mt-2">
|
||||
{{ rxRate }}/s
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid purple">
|
||||
<div class="font-bold">
|
||||
{{ t('upload') }}
|
||||
</div>
|
||||
<div class="text-xl mt-2">
|
||||
{{ txRate }}/s
|
||||
</div>
|
||||
<div class="flex flex-row items-center flex-wrap w-full max-h-40 overflow-scroll">
|
||||
<Chip v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon"
|
||||
class="mr-2 mt-2 text-sm" />
|
||||
</div>
|
||||
|
||||
<div class="rounded-full w-32 h-32 flex flex-col items-center pt-6" style="border: 1px solid fuchsia">
|
||||
<div class="font-bold">
|
||||
{{ t('download') }}
|
||||
</div>
|
||||
<div class="text-xl mt-2">
|
||||
{{ rxRate }}/s
|
||||
</div>
|
||||
<div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm">
|
||||
<Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" />
|
||||
<Button severity="info" :label="t('show_event_log')" @click="showEventLogs" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-row items-center flex-wrap w-full max-h-40 overflow-scroll">
|
||||
<Chip v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon"
|
||||
class="mr-2 mt-2 text-sm" />
|
||||
</div>
|
||||
|
||||
<div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm">
|
||||
<Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" />
|
||||
<Button severity="info" :label="t('show_event_log')" @click="showEventLogs" />
|
||||
</div>
|
||||
</div>
|
||||
</Panel>
|
||||
</template>
|
||||
</Card>
|
||||
|
||||
<Divider />
|
||||
|
||||
<Panel>
|
||||
<template #header>
|
||||
<Card>
|
||||
<template #title>
|
||||
{{ t('peer_info') }}
|
||||
</template>
|
||||
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full">
|
||||
<Column :field="ipFormat" :header="t('virtual_ipv4')" />
|
||||
<Column :header="t('hostname')">
|
||||
<template #body="slotProps">
|
||||
<div v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server"
|
||||
v-tooltip="slotProps.data.route.hostname">
|
||||
{{
|
||||
slotProps.data.route.hostname }}
|
||||
</div>
|
||||
<div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1">
|
||||
<Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info">
|
||||
{{ t('status.server') }}
|
||||
</Tag>
|
||||
<Tag v-if="slotProps.data.route.feature_flag.avoid_relay_data" severity="warn" value="Warn">
|
||||
{{ t('status.relay') }}
|
||||
</Tag>
|
||||
</div>
|
||||
</template>
|
||||
</Column>
|
||||
<Column :field="routeCost" :header="t('route_cost')" />
|
||||
<Column :field="latencyMs" :header="t('latency')" />
|
||||
<Column :field="txBytes" :header="t('upload_bytes')" />
|
||||
<Column :field="rxBytes" :header="t('download_bytes')" />
|
||||
<Column :field="lossRate" :header="t('loss_rate')" />
|
||||
<Column :header="t('status.version')">
|
||||
<template #body="slotProps">
|
||||
<span>{{ version(slotProps.data) }}</span>
|
||||
</template>
|
||||
</Column>
|
||||
</DataTable>
|
||||
</Panel>
|
||||
<template #content>
|
||||
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-class="w-full">
|
||||
<Column :field="ipFormat" :header="t('virtual_ipv4')" />
|
||||
<Column :header="t('hostname')">
|
||||
<template #body="slotProps">
|
||||
<div v-if="!slotProps.data.route.cost || !slotProps.data.route.feature_flag.is_public_server"
|
||||
v-tooltip="slotProps.data.route.hostname">
|
||||
{{
|
||||
slotProps.data.route.hostname }}
|
||||
</div>
|
||||
<div v-else v-tooltip="slotProps.data.route.hostname" class="space-x-1">
|
||||
<Tag v-if="slotProps.data.route.feature_flag.is_public_server" severity="info" value="Info">
|
||||
{{ t('status.server') }}
|
||||
</Tag>
|
||||
<Tag v-if="slotProps.data.route.feature_flag.avoid_relay_data" severity="warn" value="Warn">
|
||||
{{ t('status.relay') }}
|
||||
</Tag>
|
||||
</div>
|
||||
</template>
|
||||
</Column>
|
||||
<Column :field="routeCost" :header="t('route_cost')" />
|
||||
<Column :field="latencyMs" :header="t('latency')" />
|
||||
<Column :field="txBytes" :header="t('upload_bytes')" />
|
||||
<Column :field="rxBytes" :header="t('download_bytes')" />
|
||||
<Column :field="lossRate" :header="t('loss_rate')" />
|
||||
<Column :header="t('status.version')">
|
||||
<template #body="slotProps">
|
||||
<span>{{ version(slotProps.data) }}</span>
|
||||
</template>
|
||||
</Column>
|
||||
</DataTable>
|
||||
</template>
|
||||
</Card>
|
||||
</template>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
@@ -109,6 +109,13 @@ proxy_forward_by_system_help: 通过系统内核转发子网代理数据包,
|
||||
disable_encryption: 禁用加密
|
||||
disable_encryption_help: 禁用对等节点通信的加密,默认为false,必须与对等节点相同
|
||||
|
||||
disable_udp_hole_punching: 禁用UDP打洞
|
||||
disable_udp_hole_punching_help: 禁用UDP打洞功能
|
||||
|
||||
enable_magic_dns: 启用魔法DNS
|
||||
enable_magic_dns_help: |
|
||||
启用魔法DNS,允许通过EasyTier的DNS服务器访问其他节点的虚拟IPv4地址, 如 node1.et.net。
|
||||
|
||||
relay_network_whitelist: 网络白名单
|
||||
relay_network_whitelist_help: |
|
||||
仅转发白名单网络的流量,支持通配符字符串。多个网络名称间可以使用英文空格间隔。
|
||||
@@ -125,6 +132,16 @@ socks5_help: |
|
||||
exit_nodes: 出口节点列表
|
||||
exit_nodes_help: 转发所有流量的出口节点,虚拟IPv4地址,优先级由列表顺序决定
|
||||
|
||||
mtu: MTU
|
||||
mtu_help: |
|
||||
TUN设备的MTU,默认为非加密时为1380,加密时为1360。范围:400-1380
|
||||
mtu_placeholder: 留空为默认值1380
|
||||
|
||||
mapped_listeners: 监听映射
|
||||
mapped_listeners_help: |
|
||||
手动指定监听器的公网地址,其他节点可以使用该地址连接到本节点。
|
||||
例如:tcp://123.123.123.123:11223,可以指定多个。
|
||||
|
||||
status:
|
||||
version: 内核版本
|
||||
local: 本机
|
||||
@@ -169,4 +186,4 @@ event:
|
||||
VpnPortalClientDisconnected: VPN门户客户端已断开连接
|
||||
DhcpIpv4Changed: DHCP IPv4地址更改
|
||||
DhcpIpv4Conflicted: DHCP IPv4地址冲突
|
||||
|
||||
PortForwardAdded: 端口转发添加
|
||||
|
||||
@@ -108,6 +108,13 @@ proxy_forward_by_system_help: Forward packet to proxy networks via system kernel
|
||||
disable_encryption: Disable Encryption
|
||||
disable_encryption_help: Disable encryption for peers communication, default is false, must be same with peers
|
||||
|
||||
disable_udp_hole_punching: Disable UDP Hole Punching
|
||||
disable_udp_hole_punching_help: Disable udp hole punching
|
||||
|
||||
enable_magic_dns: Enable Magic DNS
|
||||
enable_magic_dns_help: |
|
||||
Enable magic dns, all nodes in the network can access each other by domain name, e.g.: node1.et.net.
|
||||
|
||||
relay_network_whitelist: Network Whitelist
|
||||
relay_network_whitelist_help: |
|
||||
Only forward traffic from the whitelist networks, supporting wildcard strings, multiple network names can be separated by spaces.
|
||||
@@ -125,6 +132,16 @@ socks5_help: |
|
||||
exit_nodes: Exit Nodes
|
||||
exit_nodes_help: Exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list
|
||||
|
||||
mtu: MTU
|
||||
mtu_help: |
|
||||
MTU of the TUN device, default is 1380 for non-encryption, 1360 for encryption. Range:400-1380
|
||||
mtu_placeholder: Leave blank as default value 1380
|
||||
|
||||
mapped_listeners: Map Listeners
|
||||
mapped_listeners_help: |
|
||||
Manually specify the public address of the listener, other nodes can use this address to connect to this node.
|
||||
e.g.: tcp://123.123.123.123:11223, can specify multiple.
|
||||
|
||||
status:
|
||||
version: Version
|
||||
local: Local
|
||||
@@ -169,3 +186,4 @@ event:
|
||||
VpnPortalClientDisconnected: VpnPortalClientDisconnected
|
||||
DhcpIpv4Changed: DhcpIpv4Changed
|
||||
DhcpIpv4Conflicted: DhcpIpv4Conflicted
|
||||
PortForwardAdded: PortForwardAdded
|
||||
|
||||
@@ -47,6 +47,7 @@ export interface NetworkConfig {
|
||||
multi_thread?: boolean
|
||||
proxy_forward_by_system?: boolean
|
||||
disable_encryption?: boolean
|
||||
disable_udp_hole_punching?: boolean
|
||||
|
||||
enable_relay_network_whitelist?: boolean
|
||||
relay_network_whitelist: string[]
|
||||
@@ -58,6 +59,11 @@ export interface NetworkConfig {
|
||||
|
||||
enable_socks5?: boolean
|
||||
socks5_port: number
|
||||
|
||||
mtu: number | null
|
||||
mapped_listeners: string[]
|
||||
|
||||
enable_magic_dns?: boolean
|
||||
}
|
||||
|
||||
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
@@ -104,6 +110,7 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
multi_thread: true,
|
||||
proxy_forward_by_system: false,
|
||||
disable_encryption: false,
|
||||
disable_udp_hole_punching: false,
|
||||
enable_relay_network_whitelist: false,
|
||||
relay_network_whitelist: [],
|
||||
enable_manual_routes: false,
|
||||
@@ -111,6 +118,9 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
exit_nodes: [],
|
||||
enable_socks5: false,
|
||||
socks5_port: 1080,
|
||||
mtu: null,
|
||||
mapped_listeners: [],
|
||||
enable_magic_dns: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -257,4 +267,6 @@ export enum EventType {
|
||||
|
||||
DhcpIpv4Changed = 'DhcpIpv4Changed', // ipv4 | null, ipv4 | null
|
||||
DhcpIpv4Conflicted = 'DhcpIpv4Conflicted', // ipv4 | null
|
||||
|
||||
PortForwardAdded = 'PortForwardAdded', // PortForwardConfigPb
|
||||
}
|
||||
|
||||
@@ -9,11 +9,11 @@
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@primevue/themes": "^4.2.1",
|
||||
"@primevue/themes": "4.3.3",
|
||||
"aura": "link:@primevue/themes/aura",
|
||||
"axios": "^1.7.7",
|
||||
"easytier-frontend-lib": "workspace:*",
|
||||
"primevue": "^4.2.1",
|
||||
"primevue": "4.3.3",
|
||||
"tailwindcss-primeui": "^0.3.4",
|
||||
"vue": "^3.5.12",
|
||||
"vue-router": "4"
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
<script setup lang="ts">
|
||||
import { NetworkTypes } from 'easytier-frontend-lib';
|
||||
import { ref } from 'vue';
|
||||
import {computed, ref} from 'vue';
|
||||
import { Api } from 'easytier-frontend-lib'
|
||||
import {AutoComplete, Divider} from "primevue";
|
||||
import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host"
|
||||
|
||||
const defaultApiHost = 'https://config-server.easytier.cn'
|
||||
const api = new Api.ApiClient(defaultApiHost);
|
||||
const api = computed<Api.ApiClient>(() => new Api.ApiClient(apiHost.value));
|
||||
|
||||
|
||||
const apiHost = ref<string>(getInitialApiHost())
|
||||
const apiHostSuggestions = ref<Array<string>>([])
|
||||
const apiHostSearch = async (event: { query: string }) => {
|
||||
apiHostSuggestions.value = [];
|
||||
let hosts = cleanAndLoadApiHosts();
|
||||
if (event.query) {
|
||||
apiHostSuggestions.value.push(event.query);
|
||||
}
|
||||
hosts.forEach((host) => {
|
||||
apiHostSuggestions.value.push(host.value);
|
||||
});
|
||||
}
|
||||
|
||||
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
|
||||
const toml_config = ref<string>("Press 'Run Network' to generate TOML configuration");
|
||||
|
||||
const generateConfig = (config: NetworkTypes.NetworkConfig) => {
|
||||
api.generate_config({
|
||||
saveApiHost(apiHost.value)
|
||||
api.value?.generate_config({
|
||||
config: config
|
||||
}).then((res) => {
|
||||
if (res.error) {
|
||||
@@ -29,6 +45,14 @@ const generateConfig = (config: NetworkTypes.NetworkConfig) => {
|
||||
<div class="flex items-center justify-center m-5">
|
||||
<div class="sm:block md:flex w-full">
|
||||
<div class="sm:w-full md:w-1/2 p-4">
|
||||
<div class="flex flex-col">
|
||||
<div class="w-11/12 self-center ">
|
||||
<label>ApiHost</label>
|
||||
<AutoComplete id="api-host" v-model="apiHost" dropdown :suggestions="apiHostSuggestions"
|
||||
@complete="apiHostSearch" class="w-full" />
|
||||
<Divider />
|
||||
</div>
|
||||
</div>
|
||||
<Config :cur-network="newNetworkConfig" @run-network="generateConfig" />
|
||||
</div>
|
||||
<div class="sm:w-full md:w-1/2 p-4 bg-gray-100">
|
||||
|
||||
@@ -160,6 +160,7 @@ const createNewNetwork = async () => {
|
||||
|
||||
const newNetwork = () => {
|
||||
newNetworkConfig.value = NetworkTypes.DEFAULT_NETWORK_CONFIG();
|
||||
newNetworkConfig.value.hostname = deviceInfo.value?.hostname;
|
||||
isEditing.value = false;
|
||||
showCreateNetworkDialog.value = true;
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import { Card, InputText, Password, Button, AutoComplete } from 'primevue';
|
||||
import { useRouter } from 'vue-router';
|
||||
import { useToast } from 'primevue/usetoast';
|
||||
import { Api } from 'easytier-frontend-lib';
|
||||
import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host"
|
||||
|
||||
defineProps<{
|
||||
isRegistering: boolean;
|
||||
@@ -20,56 +21,6 @@ const registerPassword = ref('');
|
||||
const captcha = ref('');
|
||||
const captchaSrc = computed(() => api.value.captcha_url());
|
||||
|
||||
interface ApiHost {
|
||||
value: string;
|
||||
usedAt: number;
|
||||
}
|
||||
|
||||
const isValidHttpUrl = (s: string): boolean => {
|
||||
let url;
|
||||
|
||||
try {
|
||||
url = new URL(s);
|
||||
} catch (_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return url.protocol === "http:" || url.protocol === "https:";
|
||||
}
|
||||
|
||||
const cleanAndLoadApiHosts = (): Array<ApiHost> => {
|
||||
const maxHosts = 10;
|
||||
const apiHosts = localStorage.getItem('apiHosts');
|
||||
if (apiHosts) {
|
||||
const hosts: Array<ApiHost> = JSON.parse(apiHosts);
|
||||
// sort by usedAt
|
||||
hosts.sort((a, b) => b.usedAt - a.usedAt);
|
||||
|
||||
// only keep the first 10
|
||||
if (hosts.length > maxHosts) {
|
||||
hosts.splice(maxHosts);
|
||||
}
|
||||
|
||||
localStorage.setItem('apiHosts', JSON.stringify(hosts));
|
||||
return hosts;
|
||||
} else {
|
||||
return [];
|
||||
}
|
||||
};
|
||||
|
||||
const saveApiHost = (host: string) => {
|
||||
console.log('Save API Host:', host);
|
||||
if (!isValidHttpUrl(host)) {
|
||||
console.error('Invalid API Host:', host);
|
||||
return;
|
||||
}
|
||||
|
||||
let hosts = cleanAndLoadApiHosts();
|
||||
const newHost: ApiHost = { value: host, usedAt: Date.now() };
|
||||
hosts = hosts.filter((h) => h.value !== host);
|
||||
hosts.push(newHost);
|
||||
localStorage.setItem('apiHosts', JSON.stringify(hosts));
|
||||
};
|
||||
|
||||
const onSubmit = async () => {
|
||||
// Add your login logic here
|
||||
@@ -100,16 +51,6 @@ const onRegister = async () => {
|
||||
}
|
||||
};
|
||||
|
||||
const getInitialApiHost = (): string => {
|
||||
const hosts = cleanAndLoadApiHosts();
|
||||
if (hosts.length > 0) {
|
||||
return hosts[0].value;
|
||||
} else {
|
||||
return defaultApiHost;
|
||||
}
|
||||
};
|
||||
|
||||
const defaultApiHost = 'https://config-server.easytier.cn'
|
||||
const apiHost = ref<string>(getInitialApiHost())
|
||||
const apiHostSuggestions = ref<Array<string>>([])
|
||||
const apiHostSearch = async (event: { query: string }) => {
|
||||
@@ -124,10 +65,7 @@ const apiHostSearch = async (event: { query: string }) => {
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
let hosts = cleanAndLoadApiHosts();
|
||||
if (hosts.length === 0) {
|
||||
saveApiHost(defaultApiHost);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
</script>
|
||||
|
||||
64
easytier-web/frontend/src/modules/api-host.ts
Normal file
64
easytier-web/frontend/src/modules/api-host.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
const defaultApiHost = 'https://config-server.easytier.cn';
|
||||
|
||||
interface ApiHost {
|
||||
value: string;
|
||||
usedAt: number;
|
||||
}
|
||||
|
||||
const isValidHttpUrl = (s: string): boolean => {
|
||||
let url;
|
||||
|
||||
try {
|
||||
url = new URL(s);
|
||||
} catch (_) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return url.protocol === "http:" || url.protocol === "https:";
|
||||
};
|
||||
|
||||
const cleanAndLoadApiHosts = (): Array<ApiHost> => {
|
||||
const maxHosts = 10;
|
||||
const apiHosts = localStorage.getItem('apiHosts');
|
||||
if (apiHosts) {
|
||||
const hosts: Array<ApiHost> = JSON.parse(apiHosts);
|
||||
// sort by usedAt
|
||||
hosts.sort((a, b) => b.usedAt - a.usedAt);
|
||||
|
||||
// only keep the first 10
|
||||
if (hosts.length > maxHosts) {
|
||||
hosts.splice(maxHosts);
|
||||
}
|
||||
|
||||
localStorage.setItem('apiHosts', JSON.stringify(hosts));
|
||||
return hosts;
|
||||
} else {
|
||||
return [];
|
||||
}
|
||||
};
|
||||
|
||||
const saveApiHost = (host: string) => {
|
||||
console.log('Save API Host:', host);
|
||||
if (!isValidHttpUrl(host)) {
|
||||
console.error('Invalid API Host:', host);
|
||||
return;
|
||||
}
|
||||
|
||||
let hosts = cleanAndLoadApiHosts();
|
||||
const newHost: ApiHost = {value: host, usedAt: Date.now()};
|
||||
hosts = hosts.filter((h) => h.value !== host);
|
||||
hosts.push(newHost);
|
||||
localStorage.setItem('apiHosts', JSON.stringify(hosts));
|
||||
};
|
||||
|
||||
const getInitialApiHost = (): string => {
|
||||
const hosts = cleanAndLoadApiHosts();
|
||||
if (hosts.length > 0) {
|
||||
return hosts[0].value;
|
||||
} else {
|
||||
saveApiHost(defaultApiHost)
|
||||
return defaultApiHost;
|
||||
}
|
||||
};
|
||||
|
||||
export {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost}
|
||||
@@ -1,9 +1,11 @@
|
||||
import { defineConfig } from 'vite'
|
||||
import vue from '@vitejs/plugin-vue'
|
||||
import { viteSingleFile } from "vite-plugin-singlefile"
|
||||
// import { viteSingleFile } from "vite-plugin-singlefile"
|
||||
|
||||
const WEB_BASE_URL = process.env.WEB_BASE_URL || '';
|
||||
|
||||
// https://vite.dev/config/
|
||||
export default defineConfig({
|
||||
base: '',
|
||||
plugins: [vue(), viteSingleFile()],
|
||||
base: WEB_BASE_URL,
|
||||
plugins: [vue(),/* viteSingleFile() */],
|
||||
})
|
||||
|
||||
@@ -22,3 +22,9 @@ cli:
|
||||
api_server_port:
|
||||
en: "The port to listen for the restful server, acting as ApiHost and used by the web frontend"
|
||||
zh-CN: "restful 服务器的监听端口,作为 ApiHost 并被 web 前端使用"
|
||||
web_server_port:
|
||||
en: "The port to listen for the web dashboard server, default is same as the api server port"
|
||||
zh-CN: "web dashboard 服务器的监听端口, 默认为与 api 服务器端口相同"
|
||||
no_web:
|
||||
en: "Do not run the web dashboard server"
|
||||
zh-CN: "不运行 web dashboard 服务器"
|
||||
@@ -10,7 +10,7 @@ use easytier::{
|
||||
use session::Session;
|
||||
use storage::{Storage, StorageToken};
|
||||
|
||||
use crate::db::Db;
|
||||
use crate::db::{Db, UserIdInDb};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ClientManager {
|
||||
@@ -86,15 +86,21 @@ impl ClientManager {
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn get_session_by_machine_id(&self, machine_id: &uuid::Uuid) -> Option<Arc<Session>> {
|
||||
let c_url = self.storage.get_client_url_by_machine_id(machine_id)?;
|
||||
pub fn get_session_by_machine_id(
|
||||
&self,
|
||||
user_id: UserIdInDb,
|
||||
machine_id: &uuid::Uuid,
|
||||
) -> Option<Arc<Session>> {
|
||||
let c_url = self
|
||||
.storage
|
||||
.get_client_url_by_machine_id(user_id, machine_id)?;
|
||||
self.client_sessions
|
||||
.get(&c_url)
|
||||
.map(|item| item.value().clone())
|
||||
}
|
||||
|
||||
pub async fn list_machine_by_token(&self, token: String) -> Vec<url::Url> {
|
||||
self.storage.list_token_clients(&token)
|
||||
pub async fn list_machine_by_user_id(&self, user_id: UserIdInDb) -> Vec<url::Url> {
|
||||
self.storage.list_user_clients(user_id)
|
||||
}
|
||||
|
||||
pub async fn get_heartbeat_requests(&self, client_url: &url::Url) -> Option<HeartbeatRequest> {
|
||||
@@ -118,6 +124,7 @@ mod tests {
|
||||
},
|
||||
web_client::WebClient,
|
||||
};
|
||||
use sqlx::Executor;
|
||||
|
||||
use crate::{client_manager::ClientManager, db::Db};
|
||||
|
||||
@@ -127,8 +134,14 @@ mod tests {
|
||||
let mut mgr = ClientManager::new(Db::memory_db().await);
|
||||
mgr.serve(Box::new(listener)).await.unwrap();
|
||||
|
||||
mgr.db()
|
||||
.inner()
|
||||
.execute("INSERT INTO users (username, password) VALUES ('test', 'test')")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let connector = UdpTunnelConnector::new("udp://127.0.0.1:54333".parse().unwrap());
|
||||
let _c = WebClient::new(connector, "test");
|
||||
let _c = WebClient::new(connector, "test", "test");
|
||||
|
||||
wait_for_condition(
|
||||
|| async { mgr.client_sessions.len() == 1 },
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::{fmt::Debug, str::FromStr as _, sync::Arc};
|
||||
|
||||
use anyhow::Context;
|
||||
use easytier::{
|
||||
common::scoped_task::ScopedTask,
|
||||
proto::{
|
||||
@@ -68,6 +69,66 @@ struct SessionRpcService {
|
||||
data: SharedSessionData,
|
||||
}
|
||||
|
||||
impl SessionRpcService {
|
||||
async fn handle_heartbeat(
|
||||
&self,
|
||||
req: HeartbeatRequest,
|
||||
) -> rpc_types::error::Result<HeartbeatResponse> {
|
||||
let mut data = self.data.write().await;
|
||||
|
||||
let Ok(storage) = Storage::try_from(data.storage.clone()) else {
|
||||
tracing::error!("Failed to get storage");
|
||||
return Ok(HeartbeatResponse {});
|
||||
};
|
||||
|
||||
let machine_id: uuid::Uuid =
|
||||
req.machine_id
|
||||
.clone()
|
||||
.map(Into::into)
|
||||
.ok_or(anyhow::anyhow!(
|
||||
"Machine id is not set correctly, expect uuid but got: {:?}",
|
||||
req.machine_id
|
||||
))?;
|
||||
|
||||
let user_id = storage
|
||||
.db()
|
||||
.get_user_id_by_token(req.user_token.clone())
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get user id by token from db: {:?}",
|
||||
req.user_token
|
||||
)
|
||||
})?
|
||||
.ok_or(anyhow::anyhow!(
|
||||
"User not found by token: {:?}",
|
||||
req.user_token
|
||||
))?;
|
||||
|
||||
if data.req.replace(req.clone()).is_none() {
|
||||
assert!(data.storage_token.is_none());
|
||||
data.storage_token = Some(StorageToken {
|
||||
token: req.user_token.clone().into(),
|
||||
client_url: data.client_url.clone(),
|
||||
machine_id,
|
||||
user_id,
|
||||
});
|
||||
}
|
||||
|
||||
let Ok(report_time) = chrono::DateTime::<chrono::Local>::from_str(&req.report_time) else {
|
||||
tracing::error!("Failed to parse report time: {:?}", req.report_time);
|
||||
return Ok(HeartbeatResponse {});
|
||||
};
|
||||
storage.update_client(
|
||||
data.storage_token.as_ref().unwrap().clone(),
|
||||
report_time.timestamp(),
|
||||
);
|
||||
|
||||
let _ = data.notifier.send(req);
|
||||
Ok(HeartbeatResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WebServerService for SessionRpcService {
|
||||
type Controller = BaseController;
|
||||
@@ -77,34 +138,13 @@ impl WebServerService for SessionRpcService {
|
||||
_: BaseController,
|
||||
req: HeartbeatRequest,
|
||||
) -> rpc_types::error::Result<HeartbeatResponse> {
|
||||
let mut data = self.data.write().await;
|
||||
if data.req.replace(req.clone()).is_none() {
|
||||
assert!(data.storage_token.is_none());
|
||||
data.storage_token = Some(StorageToken {
|
||||
token: req.user_token.clone().into(),
|
||||
client_url: data.client_url.clone(),
|
||||
machine_id: req
|
||||
.machine_id
|
||||
.clone()
|
||||
.map(Into::into)
|
||||
.unwrap_or(uuid::Uuid::new_v4()),
|
||||
});
|
||||
let ret = self.handle_heartbeat(req).await;
|
||||
if ret.is_err() {
|
||||
tracing::warn!("Failed to handle heartbeat: {:?}", ret);
|
||||
// sleep for a while to avoid client busy loop
|
||||
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
|
||||
}
|
||||
|
||||
if let Ok(storage) = Storage::try_from(data.storage.clone()) {
|
||||
let Ok(report_time) = chrono::DateTime::<chrono::Local>::from_str(&req.report_time)
|
||||
else {
|
||||
tracing::error!("Failed to parse report time: {:?}", req.report_time);
|
||||
return Ok(HeartbeatResponse {});
|
||||
};
|
||||
storage.update_client(
|
||||
data.storage_token.as_ref().unwrap().clone(),
|
||||
report_time.timestamp(),
|
||||
);
|
||||
}
|
||||
|
||||
let _ = data.notifier.send(req);
|
||||
Ok(HeartbeatResponse {})
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::sync::{Arc, Weak};
|
||||
|
||||
use dashmap::DashMap;
|
||||
|
||||
use crate::db::Db;
|
||||
use crate::db::{Db, UserIdInDb};
|
||||
|
||||
// use this to maintain Storage
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
@@ -10,21 +10,19 @@ pub struct StorageToken {
|
||||
pub token: String,
|
||||
pub client_url: url::Url,
|
||||
pub machine_id: uuid::Uuid,
|
||||
pub user_id: UserIdInDb,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct ClientInfo {
|
||||
client_url: url::Url,
|
||||
machine_id: uuid::Uuid,
|
||||
token: String,
|
||||
storage_token: StorageToken,
|
||||
report_time: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StorageInner {
|
||||
// some map for indexing
|
||||
token_clients_map: DashMap<String, DashMap<uuid::Uuid, ClientInfo>>,
|
||||
machine_client_url_map: DashMap<uuid::Uuid, ClientInfo>,
|
||||
user_clients_map: DashMap<UserIdInDb, DashMap<uuid::Uuid, ClientInfo>>,
|
||||
pub db: Db,
|
||||
}
|
||||
|
||||
@@ -43,8 +41,7 @@ impl TryFrom<WeakRefStorage> for Storage {
|
||||
impl Storage {
|
||||
pub fn new(db: Db) -> Self {
|
||||
Storage(Arc::new(StorageInner {
|
||||
token_clients_map: DashMap::new(),
|
||||
machine_client_url_map: DashMap::new(),
|
||||
user_clients_map: DashMap::new(),
|
||||
db,
|
||||
}))
|
||||
}
|
||||
@@ -54,17 +51,22 @@ impl Storage {
|
||||
machine_id: &uuid::Uuid,
|
||||
client_url: &url::Url,
|
||||
) {
|
||||
map.remove_if(&machine_id, |_, v| v.client_url == *client_url);
|
||||
map.remove_if(&machine_id, |_, v| {
|
||||
v.storage_token.client_url == *client_url
|
||||
});
|
||||
}
|
||||
|
||||
fn update_mid_to_client_info_map(
|
||||
map: &DashMap<uuid::Uuid, ClientInfo>,
|
||||
client_info: &ClientInfo,
|
||||
) {
|
||||
map.entry(client_info.machine_id)
|
||||
map.entry(client_info.storage_token.machine_id)
|
||||
.and_modify(|e| {
|
||||
if e.report_time < client_info.report_time {
|
||||
assert_eq!(e.machine_id, client_info.machine_id);
|
||||
assert_eq!(
|
||||
e.storage_token.machine_id,
|
||||
client_info.storage_token.machine_id
|
||||
);
|
||||
*e = client_info.clone();
|
||||
}
|
||||
})
|
||||
@@ -74,53 +76,51 @@ impl Storage {
|
||||
pub fn update_client(&self, stoken: StorageToken, report_time: i64) {
|
||||
let inner = self
|
||||
.0
|
||||
.token_clients_map
|
||||
.entry(stoken.token.clone())
|
||||
.user_clients_map
|
||||
.entry(stoken.user_id)
|
||||
.or_insert_with(DashMap::new);
|
||||
|
||||
let client_info = ClientInfo {
|
||||
client_url: stoken.client_url.clone(),
|
||||
machine_id: stoken.machine_id,
|
||||
token: stoken.token.clone(),
|
||||
storage_token: stoken.clone(),
|
||||
report_time,
|
||||
};
|
||||
|
||||
Self::update_mid_to_client_info_map(&inner, &client_info);
|
||||
Self::update_mid_to_client_info_map(&self.0.machine_client_url_map, &client_info);
|
||||
}
|
||||
|
||||
pub fn remove_client(&self, stoken: &StorageToken) {
|
||||
self.0.token_clients_map.remove_if(&stoken.token, |_, set| {
|
||||
Self::remove_mid_to_client_info_map(set, &stoken.machine_id, &stoken.client_url);
|
||||
set.is_empty()
|
||||
});
|
||||
|
||||
Self::remove_mid_to_client_info_map(
|
||||
&self.0.machine_client_url_map,
|
||||
&stoken.machine_id,
|
||||
&stoken.client_url,
|
||||
);
|
||||
self.0
|
||||
.user_clients_map
|
||||
.remove_if(&stoken.user_id, |_, set| {
|
||||
Self::remove_mid_to_client_info_map(set, &stoken.machine_id, &stoken.client_url);
|
||||
set.is_empty()
|
||||
});
|
||||
}
|
||||
|
||||
pub fn weak_ref(&self) -> WeakRefStorage {
|
||||
Arc::downgrade(&self.0)
|
||||
}
|
||||
|
||||
pub fn get_client_url_by_machine_id(&self, machine_id: &uuid::Uuid) -> Option<url::Url> {
|
||||
self.0
|
||||
.machine_client_url_map
|
||||
.get(&machine_id)
|
||||
.map(|info| info.client_url.clone())
|
||||
pub fn get_client_url_by_machine_id(
|
||||
&self,
|
||||
user_id: UserIdInDb,
|
||||
machine_id: &uuid::Uuid,
|
||||
) -> Option<url::Url> {
|
||||
self.0.user_clients_map.get(&user_id).and_then(|info_map| {
|
||||
info_map
|
||||
.get(machine_id)
|
||||
.map(|info| info.storage_token.client_url.clone())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn list_token_clients(&self, token: &str) -> Vec<url::Url> {
|
||||
pub fn list_user_clients(&self, user_id: UserIdInDb) -> Vec<url::Url> {
|
||||
self.0
|
||||
.token_clients_map
|
||||
.get(token)
|
||||
.user_clients_map
|
||||
.get(&user_id)
|
||||
.map(|info_map| {
|
||||
info_map
|
||||
.iter()
|
||||
.map(|info| info.value().client_url.clone())
|
||||
.map(|info| info.value().storage_token.client_url.clone())
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default()
|
||||
|
||||
@@ -12,7 +12,7 @@ use sqlx::{migrate::MigrateDatabase as _, types::chrono, Sqlite, SqlitePool};
|
||||
|
||||
use crate::migrator;
|
||||
|
||||
type UserIdInDb = i32;
|
||||
pub type UserIdInDb = i32;
|
||||
|
||||
pub enum ListNetworkProps {
|
||||
All,
|
||||
|
||||
@@ -5,7 +5,7 @@ extern crate rust_i18n;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::{command, Parser};
|
||||
use clap::Parser;
|
||||
use easytier::{
|
||||
common::{
|
||||
config::{ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, TomlConfigLoader},
|
||||
@@ -21,6 +21,9 @@ mod db;
|
||||
mod migrator;
|
||||
mod restful;
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
mod web;
|
||||
|
||||
rust_i18n::i18n!("locales", fallback = "en");
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
@@ -70,11 +73,25 @@ struct Cli {
|
||||
help = t!("cli.api_server_port").to_string(),
|
||||
)]
|
||||
api_server_port: u16,
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
#[arg(
|
||||
long,
|
||||
short='l',
|
||||
help = t!("cli.web_server_port").to_string(),
|
||||
)]
|
||||
web_server_port: Option<u16>,
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
#[arg(
|
||||
long,
|
||||
help = t!("cli.no_web").to_string(),
|
||||
default_value = "false"
|
||||
)]
|
||||
no_web: bool,
|
||||
}
|
||||
|
||||
pub fn get_listener_by_url(
|
||||
l: &url::Url,
|
||||
) -> Result<Box<dyn TunnelListener>, Error> {
|
||||
pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> {
|
||||
Ok(match l.scheme() {
|
||||
"tcp" => Box::new(TcpTunnelListener::new(l.clone())),
|
||||
"udp" => Box::new(UdpTunnelListener::new(l.clone())),
|
||||
@@ -106,20 +123,49 @@ async fn main() {
|
||||
let db = db::Db::new(cli.db).await.unwrap();
|
||||
|
||||
let listener = get_listener_by_url(
|
||||
&format!("{}://0.0.0.0:{}", cli.config_server_protocol, cli.config_server_port).parse().unwrap(),
|
||||
&format!(
|
||||
"{}://0.0.0.0:{}",
|
||||
cli.config_server_protocol, cli.config_server_port
|
||||
)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let mut mgr = client_manager::ClientManager::new(db.clone());
|
||||
mgr.serve(listener).await.unwrap();
|
||||
let mgr = Arc::new(mgr);
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
let restful_also_serve_web = !cli.no_web
|
||||
&& (cli.web_server_port.is_none() || cli.web_server_port == Some(cli.api_server_port));
|
||||
|
||||
#[cfg(not(feature = "embed"))]
|
||||
let restful_also_serve_web = false;
|
||||
|
||||
let mut restful_server = restful::RestfulServer::new(
|
||||
format!("0.0.0.0:{}", cli.api_server_port).parse().unwrap(),
|
||||
mgr.clone(),
|
||||
db,
|
||||
restful_also_serve_web,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
restful_server.start().await.unwrap();
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
let mut web_server = web::WebServer::new(
|
||||
format!("0.0.0.0:{}", cli.web_server_port.unwrap_or(0))
|
||||
.parse()
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
if !cli.no_web && !restful_also_serve_web {
|
||||
web_server.start().await.unwrap();
|
||||
}
|
||||
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use axum::http::StatusCode;
|
||||
use axum::routing::post;
|
||||
use axum::{extract::State, routing::get, Json, Router};
|
||||
use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer};
|
||||
use axum_login::{login_required, AuthManagerLayerBuilder, AuthzBackend};
|
||||
use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend};
|
||||
use axum_messages::MessagesManagerLayer;
|
||||
use easytier::common::config::ConfigLoader;
|
||||
use easytier::common::scoped_task::ScopedTask;
|
||||
@@ -24,11 +24,16 @@ use tower_sessions::Expiry;
|
||||
use tower_sessions_sqlx_store::SqliteStore;
|
||||
use users::{AuthSession, Backend};
|
||||
|
||||
use crate::client_manager::session::Session;
|
||||
use crate::client_manager::storage::StorageToken;
|
||||
use crate::client_manager::ClientManager;
|
||||
use crate::db::Db;
|
||||
|
||||
/// Embed assets for web dashboard, build frontend first
|
||||
#[cfg(feature = "embed")]
|
||||
#[derive(rust_embed::RustEmbed, Clone)]
|
||||
#[folder = "frontend/dist/"]
|
||||
struct Assets;
|
||||
|
||||
pub struct RestfulServer {
|
||||
bind_addr: SocketAddr,
|
||||
client_mgr: Arc<ClientManager>,
|
||||
@@ -38,6 +43,8 @@ pub struct RestfulServer {
|
||||
delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>,
|
||||
|
||||
network_api: NetworkApi,
|
||||
|
||||
enable_web_embed: bool,
|
||||
}
|
||||
|
||||
type AppStateInner = Arc<ClientManager>;
|
||||
@@ -87,6 +94,7 @@ impl RestfulServer {
|
||||
bind_addr: SocketAddr,
|
||||
client_mgr: Arc<ClientManager>,
|
||||
db: Db,
|
||||
enable_web_embed: bool,
|
||||
) -> anyhow::Result<Self> {
|
||||
assert!(client_mgr.is_running());
|
||||
|
||||
@@ -99,20 +107,10 @@ impl RestfulServer {
|
||||
serve_task: None,
|
||||
delete_task: None,
|
||||
network_api,
|
||||
enable_web_embed,
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_session_by_machine_id(
|
||||
client_mgr: &ClientManager,
|
||||
machine_id: &uuid::Uuid,
|
||||
) -> Result<Arc<Session>, HttpHandleError> {
|
||||
let Some(result) = client_mgr.get_session_by_machine_id(machine_id) else {
|
||||
return Err((StatusCode::NOT_FOUND, other_error("No such session").into()));
|
||||
};
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
async fn handle_list_all_sessions(
|
||||
auth_session: AuthSession,
|
||||
State(client_mgr): AppState,
|
||||
@@ -135,9 +133,7 @@ impl RestfulServer {
|
||||
return Err((StatusCode::UNAUTHORIZED, other_error("No such user").into()));
|
||||
};
|
||||
|
||||
let machines = client_mgr
|
||||
.list_machine_by_token(user.tokens[0].clone())
|
||||
.await;
|
||||
let machines = client_mgr.list_machine_by_user_id(user.id().clone()).await;
|
||||
|
||||
Ok(GetSummaryJsonResp {
|
||||
device_count: machines.len() as u32,
|
||||
@@ -219,6 +215,15 @@ impl RestfulServer {
|
||||
.layer(tower_http::cors::CorsLayer::very_permissive())
|
||||
.layer(compression_layer);
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
let app = if self.enable_web_embed {
|
||||
use axum_embed::ServeEmbed;
|
||||
let service = ServeEmbed::<Assets>::new();
|
||||
app.fallback_service(service)
|
||||
} else {
|
||||
app
|
||||
};
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
});
|
||||
|
||||
@@ -5,7 +5,6 @@ use axum::http::StatusCode;
|
||||
use axum::routing::{delete, post};
|
||||
use axum::{extract::State, routing::get, Json, Router};
|
||||
use axum_login::AuthUser;
|
||||
use dashmap::DashSet;
|
||||
use easytier::launcher::NetworkConfig;
|
||||
use easytier::proto::common::Void;
|
||||
use easytier::proto::rpc_types::controller::BaseController;
|
||||
@@ -13,7 +12,7 @@ use easytier::proto::web::*;
|
||||
|
||||
use crate::client_manager::session::Session;
|
||||
use crate::client_manager::ClientManager;
|
||||
use crate::db::ListNetworkProps;
|
||||
use crate::db::{ListNetworkProps, UserIdInDb};
|
||||
|
||||
use super::users::AuthSession;
|
||||
use super::{
|
||||
@@ -81,12 +80,24 @@ impl NetworkApi {
|
||||
Self {}
|
||||
}
|
||||
|
||||
fn get_user_id(auth_session: &AuthSession) -> Result<UserIdInDb, (StatusCode, Json<Error>)> {
|
||||
let Some(user_id) = auth_session.user.as_ref().map(|x| x.id()) else {
|
||||
return Err((
|
||||
StatusCode::UNAUTHORIZED,
|
||||
other_error(format!("No user id found")).into(),
|
||||
));
|
||||
};
|
||||
Ok(user_id)
|
||||
}
|
||||
|
||||
async fn get_session_by_machine_id(
|
||||
auth_session: &AuthSession,
|
||||
client_mgr: &ClientManager,
|
||||
machine_id: &uuid::Uuid,
|
||||
) -> Result<Arc<Session>, HttpHandleError> {
|
||||
let Some(result) = client_mgr.get_session_by_machine_id(machine_id) else {
|
||||
let user_id = Self::get_user_id(auth_session)?;
|
||||
|
||||
let Some(result) = client_mgr.get_session_by_machine_id(user_id, machine_id) else {
|
||||
return Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
other_error(format!("No such session: {}", machine_id)).into(),
|
||||
@@ -289,23 +300,13 @@ impl NetworkApi {
|
||||
auth_session: AuthSession,
|
||||
State(client_mgr): AppState,
|
||||
) -> Result<Json<ListMachineJsonResp>, HttpHandleError> {
|
||||
let tokens = auth_session
|
||||
.user
|
||||
.as_ref()
|
||||
.map(|x| x.tokens.clone())
|
||||
.unwrap_or_default();
|
||||
let user_id = Self::get_user_id(&auth_session)?;
|
||||
|
||||
let client_urls = DashSet::new();
|
||||
for token in tokens {
|
||||
let urls = client_mgr.list_machine_by_token(token).await;
|
||||
for url in urls {
|
||||
client_urls.insert(url);
|
||||
}
|
||||
}
|
||||
let client_urls = client_mgr.list_machine_by_user_id(user_id).await;
|
||||
|
||||
let mut machines = vec![];
|
||||
for item in client_urls.iter() {
|
||||
let client_url = item.key().clone();
|
||||
let client_url = item.clone();
|
||||
let session = client_mgr.get_heartbeat_requests(&client_url).await;
|
||||
machines.push(ListMachineItem {
|
||||
client_url: Some(client_url),
|
||||
|
||||
39
easytier-web/src/web/mod.rs
Normal file
39
easytier-web/src/web/mod.rs
Normal file
@@ -0,0 +1,39 @@
|
||||
use axum::Router;
|
||||
use easytier::common::scoped_task::ScopedTask;
|
||||
use rust_embed::RustEmbed;
|
||||
use std::net::SocketAddr;
|
||||
use axum_embed::ServeEmbed;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
/// Embed assets for web dashboard, build frontend first
|
||||
#[derive(RustEmbed, Clone)]
|
||||
#[folder = "frontend/dist/"]
|
||||
struct Assets;
|
||||
|
||||
pub struct WebServer {
|
||||
bind_addr: SocketAddr,
|
||||
serve_task: Option<ScopedTask<()>>,
|
||||
}
|
||||
|
||||
impl WebServer {
|
||||
pub async fn new(bind_addr: SocketAddr) -> anyhow::Result<Self> {
|
||||
Ok(WebServer {
|
||||
bind_addr,
|
||||
serve_task: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start(&mut self) -> Result<(), anyhow::Error> {
|
||||
let listener = TcpListener::bind(self.bind_addr).await?;
|
||||
let service = ServeEmbed::<Assets>::new();
|
||||
let app = Router::new().fallback_service(service);
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
});
|
||||
|
||||
self.serve_task = Some(task.into());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@ name = "easytier"
|
||||
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
|
||||
homepage = "https://github.com/EasyTier/EasyTier"
|
||||
repository = "https://github.com/EasyTier/EasyTier"
|
||||
version = "2.2.2"
|
||||
version = "2.3.0"
|
||||
edition = "2021"
|
||||
authors = ["kkrainbow"]
|
||||
keywords = ["vpn", "p2p", "network", "easytier"]
|
||||
@@ -129,6 +129,7 @@ clap = { version = "4.5.30", features = [
|
||||
"unicode",
|
||||
"derive",
|
||||
"wrap_help",
|
||||
"env",
|
||||
] }
|
||||
|
||||
async-recursion = "1.0.5"
|
||||
@@ -136,7 +137,7 @@ async-recursion = "1.0.5"
|
||||
network-interface = "2.0"
|
||||
|
||||
# for ospf route
|
||||
petgraph = "0.7.1"
|
||||
petgraph = "0.8.1"
|
||||
|
||||
# for wireguard
|
||||
boringtun = { package = "boringtun-easytier", version = "0.6.1", optional = true }
|
||||
@@ -152,7 +153,7 @@ humansize = "2.1.3"
|
||||
|
||||
base64 = "0.22"
|
||||
|
||||
mimalloc-rust = { version = "0.2.1", optional = true }
|
||||
mimalloc-rust = { git = "https://github.com/EasyTier/mimalloc-rust", optional = true }
|
||||
|
||||
# mips
|
||||
atomic-shim = "0.2.0"
|
||||
@@ -162,8 +163,14 @@ smoltcp = { version = "0.12.0", optional = true, default-features = false, featu
|
||||
"medium-ip",
|
||||
"proto-ipv4",
|
||||
"proto-ipv6",
|
||||
"proto-ipv4-fragmentation",
|
||||
"fragmentation-buffer-size-8192",
|
||||
"assembler-max-segment-count-16",
|
||||
"reassembly-buffer-size-8192",
|
||||
"reassembly-buffer-count-16",
|
||||
"socket-tcp",
|
||||
"socket-tcp-cubic",
|
||||
"socket-udp",
|
||||
# "socket-tcp-cubic",
|
||||
"async",
|
||||
] }
|
||||
parking_lot = { version = "0.12.0", optional = true }
|
||||
@@ -176,9 +183,12 @@ sys-locale = "0.3"
|
||||
ringbuf = "0.4.5"
|
||||
async-ringbuf = "0.3.1"
|
||||
|
||||
service-manager = {git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main"}
|
||||
service-manager = { git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main" }
|
||||
|
||||
async-compression = { version = "0.4.17", default-features = false, features = ["zstd", "tokio"] }
|
||||
async-compression = { version = "0.4.17", default-features = false, features = [
|
||||
"zstd",
|
||||
"tokio",
|
||||
] }
|
||||
|
||||
kcp-sys = { git = "https://github.com/EasyTier/kcp-sys" }
|
||||
|
||||
@@ -187,12 +197,29 @@ prost-reflect = { version = "0.14.5", default-features = false, features = [
|
||||
] }
|
||||
|
||||
# for http connector
|
||||
http_req = { git = "https://github.com/EasyTier/http_req.git", default-features = false, features = ["rust-tls"] }
|
||||
http_req = { git = "https://github.com/EasyTier/http_req.git", default-features = false, features = [
|
||||
"rust-tls",
|
||||
] }
|
||||
|
||||
# for dns connector
|
||||
hickory-resolver = "0.24.4"
|
||||
hickory-resolver = "0.25.2"
|
||||
hickory-proto = "0.25.2"
|
||||
|
||||
bounded_join_set = "0.3.0"
|
||||
# for magic dns
|
||||
hickory-client = "0.25.2"
|
||||
hickory-server = { version = "0.25.2", features = ["resolver"] }
|
||||
derive_builder = "0.20.2"
|
||||
humantime-serde = "1.1.1"
|
||||
multimap = "0.10.0"
|
||||
version-compare = "0.2.0"
|
||||
|
||||
jemallocator = { version = "0.5.4", optional = true }
|
||||
jemalloc-ctl = { version = "0.5.4", optional = true }
|
||||
jemalloc-sys = { version = "0.5.4", features = [
|
||||
"stats",
|
||||
"profiling",
|
||||
"unprefixed_malloc_on_supported_platforms",
|
||||
], optional = true }
|
||||
|
||||
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
|
||||
machine-uid = "0.5.3"
|
||||
@@ -202,6 +229,10 @@ netlink-sys = "0.8.7"
|
||||
netlink-packet-route = "0.21.0"
|
||||
netlink-packet-core = { version = "0.7.0" }
|
||||
netlink-packet-utils = "0.5.2"
|
||||
# for magic dns
|
||||
resolv-conf = "0.7.3"
|
||||
dbus = { version = "0.9.7", features = ["vendored"] }
|
||||
which = "7.0.3"
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
windows = { version = "0.52.0", features = [
|
||||
@@ -212,7 +243,7 @@ windows = { version = "0.52.0", features = [
|
||||
"Win32_System_Ole",
|
||||
"Win32_Networking_WinSock",
|
||||
"Win32_System_IO",
|
||||
]}
|
||||
] }
|
||||
encoding = "0.2"
|
||||
winreg = "0.52"
|
||||
windows-service = "0.7.0"
|
||||
@@ -222,18 +253,28 @@ tonic-build = "0.12"
|
||||
globwalk = "0.8.1"
|
||||
regex = "1"
|
||||
prost-build = "0.13.2"
|
||||
rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = ["internal-namespace"] }
|
||||
rpc_build = { package = "easytier-rpc-build", version = "0.1.0", features = [
|
||||
"internal-namespace",
|
||||
] }
|
||||
prost-reflect-build = { version = "0.14.0" }
|
||||
|
||||
[target.'cfg(windows)'.build-dependencies]
|
||||
reqwest = { version = "0.12.12", features = ["blocking"] }
|
||||
zip = "0.6.6"
|
||||
|
||||
# enable thunk-rs when compiling for x86_64 or i686 windows
|
||||
[target.x86_64-pc-windows-msvc.build-dependencies]
|
||||
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
|
||||
|
||||
[target.i686-pc-windows-msvc.build-dependencies]
|
||||
thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = false, features = ["win7"] }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "3.0.0"
|
||||
rstest = "0.18.2"
|
||||
futures-util = "0.3.30"
|
||||
maplit = "1.0.2"
|
||||
|
||||
[target.'cfg(target_os = "linux")'.dev-dependencies]
|
||||
defguard_wireguard_rs = "0.4.2"
|
||||
@@ -267,3 +308,4 @@ websocket = [
|
||||
]
|
||||
smoltcp = ["dep:smoltcp", "dep:parking_lot"]
|
||||
socks5 = ["dep:smoltcp"]
|
||||
jemalloc = ["dep:jemallocator", "dep:jemalloc-ctl", "dep:jemalloc-sys"]
|
||||
|
||||
@@ -71,6 +71,8 @@ impl WindowsBuild {
|
||||
|
||||
if target.contains("x86_64") {
|
||||
println!("cargo:rustc-link-search=native=easytier/third_party/");
|
||||
} else if target.contains("i686") {
|
||||
println!("cargo:rustc-link-search=native=easytier/third_party/i686/");
|
||||
} else if target.contains("aarch64") {
|
||||
println!("cargo:rustc-link-search=native=easytier/third_party/arm64/");
|
||||
}
|
||||
@@ -125,6 +127,15 @@ fn check_locale() {
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// enable thunk-rs when target os is windows and arch is x86_64 or i686
|
||||
#[cfg(target_os = "windows")]
|
||||
if !std::env::var("TARGET")
|
||||
.unwrap_or_default()
|
||||
.contains("aarch64")
|
||||
{
|
||||
thunk::thunk();
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
WindowsBuild::check_for_win();
|
||||
|
||||
@@ -135,6 +146,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
"src/proto/tests.proto",
|
||||
"src/proto/cli.proto",
|
||||
"src/proto/web.proto",
|
||||
"src/proto/magic_dns.proto",
|
||||
];
|
||||
|
||||
for proto_file in proto_files.iter().chain(proto_files_reflect.iter()) {
|
||||
|
||||
@@ -11,8 +11,8 @@ core_clap:
|
||||
完整URL:--config-server udp://127.0.0.1:22020/admin
|
||||
仅用户名:--config-server admin,将使用官方的服务器
|
||||
config_file:
|
||||
en: "path to the config file, NOTE: if this is set, all other options will be ignored"
|
||||
zh-CN: "配置文件路径,注意:如果设置了这个选项,其他所有选项都将被忽略"
|
||||
en: "path to the config file, NOTE: the options set by cmdline args will override options in config file"
|
||||
zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项"
|
||||
network_name:
|
||||
en: "network name to identify this vpn network"
|
||||
zh-CN: "用于标识此VPN网络的网络名称"
|
||||
@@ -149,6 +149,12 @@ core_clap:
|
||||
disable_kcp_input:
|
||||
en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved."
|
||||
zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。"
|
||||
port_forward:
|
||||
en: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple."
|
||||
zh-CN: "将本地端口转发到虚拟网络中的远程端口。例如:udp://0.0.0.0:12345/10.126.126.1:23456,表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。"
|
||||
accept_dns:
|
||||
en: "if true, enable magic dns. with magic dns, you can access other nodes with a domain name, e.g.: <hostname>.et.net. magic dns will modify your system dns settings, enable it carefully."
|
||||
zh-CN: "如果为true,则启用魔法DNS。使用魔法DNS,您可以使用域名访问其他节点,例如:<hostname>.et.net。魔法DNS将修改您的系统DNS设置,请谨慎启用。"
|
||||
|
||||
core_app:
|
||||
panic_backtrace_save:
|
||||
|
||||
@@ -7,7 +7,10 @@ use std::{
|
||||
use anyhow::Context;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{proto::common::CompressionAlgoPb, tunnel::generate_digest_from_str};
|
||||
use crate::{
|
||||
proto::common::{CompressionAlgoPb, PortForwardConfigPb, SocketType},
|
||||
tunnel::generate_digest_from_str,
|
||||
};
|
||||
|
||||
pub type Flags = crate::proto::common::FlagsInConfig;
|
||||
|
||||
@@ -33,6 +36,7 @@ pub fn gen_default_flags() -> Flags {
|
||||
enable_kcp_proxy: false,
|
||||
disable_kcp_input: false,
|
||||
disable_relay_kcp: true,
|
||||
accept_dns: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +77,7 @@ pub trait ConfigLoader: Send + Sync {
|
||||
fn get_peers(&self) -> Vec<PeerConfig>;
|
||||
fn set_peers(&self, peers: Vec<PeerConfig>);
|
||||
|
||||
fn get_listeners(&self) -> Vec<url::Url>;
|
||||
fn get_listeners(&self) -> Option<Vec<url::Url>>;
|
||||
fn set_listeners(&self, listeners: Vec<url::Url>);
|
||||
|
||||
fn get_mapped_listeners(&self) -> Vec<url::Url>;
|
||||
@@ -97,6 +101,9 @@ pub trait ConfigLoader: Send + Sync {
|
||||
fn get_socks5_portal(&self) -> Option<url::Url>;
|
||||
fn set_socks5_portal(&self, addr: Option<url::Url>);
|
||||
|
||||
fn get_port_forwards(&self) -> Vec<PortForwardConfig>;
|
||||
fn set_port_forwards(&self, forwards: Vec<PortForwardConfig>);
|
||||
|
||||
fn dump(&self) -> String;
|
||||
}
|
||||
|
||||
@@ -180,6 +187,41 @@ pub struct VpnPortalConfig {
|
||||
pub wireguard_listen: SocketAddr,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
pub struct PortForwardConfig {
|
||||
pub bind_addr: SocketAddr,
|
||||
pub dst_addr: SocketAddr,
|
||||
pub proto: String,
|
||||
}
|
||||
|
||||
impl From<PortForwardConfigPb> for PortForwardConfig {
|
||||
fn from(config: PortForwardConfigPb) -> Self {
|
||||
PortForwardConfig {
|
||||
bind_addr: config.bind_addr.unwrap_or_default().into(),
|
||||
dst_addr: config.dst_addr.unwrap_or_default().into(),
|
||||
proto: match SocketType::try_from(config.socket_type) {
|
||||
Ok(SocketType::Tcp) => "tcp".to_string(),
|
||||
Ok(SocketType::Udp) => "udp".to_string(),
|
||||
_ => "tcp".to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Into<PortForwardConfigPb> for PortForwardConfig {
|
||||
fn into(self) -> PortForwardConfigPb {
|
||||
PortForwardConfigPb {
|
||||
bind_addr: Some(self.bind_addr.into()),
|
||||
dst_addr: Some(self.dst_addr.into()),
|
||||
socket_type: match self.proto.to_lowercase().as_str() {
|
||||
"tcp" => SocketType::Tcp as i32,
|
||||
"udp" => SocketType::Udp as i32,
|
||||
_ => SocketType::Tcp as i32,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
struct Config {
|
||||
netns: Option<String>,
|
||||
@@ -207,6 +249,8 @@ struct Config {
|
||||
|
||||
socks5_proxy: Option<url::Url>,
|
||||
|
||||
port_forward: Option<Vec<PortForwardConfig>>,
|
||||
|
||||
flags: Option<serde_json::Map<String, serde_json::Value>>,
|
||||
|
||||
#[serde(skip)]
|
||||
@@ -231,20 +275,23 @@ impl TomlConfigLoader {
|
||||
|
||||
config.flags_struct = Some(Self::gen_flags(config.flags.clone().unwrap_or_default()));
|
||||
|
||||
Ok(TomlConfigLoader {
|
||||
let config = TomlConfigLoader {
|
||||
config: Arc::new(Mutex::new(config)),
|
||||
})
|
||||
};
|
||||
|
||||
let old_ns = config.get_network_identity();
|
||||
config.set_network_identity(NetworkIdentity::new(
|
||||
old_ns.network_name,
|
||||
old_ns.network_secret.unwrap_or_default(),
|
||||
));
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn new(config_path: &PathBuf) -> Result<Self, anyhow::Error> {
|
||||
let config_str = std::fs::read_to_string(config_path)
|
||||
.with_context(|| format!("failed to read config file: {:?}", config_path))?;
|
||||
let ret = Self::new_from_str(&config_str)?;
|
||||
let old_ns = ret.get_network_identity();
|
||||
ret.set_network_identity(NetworkIdentity::new(
|
||||
old_ns.network_name,
|
||||
old_ns.network_secret.unwrap_or_default(),
|
||||
));
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
@@ -467,13 +514,8 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
self.config.lock().unwrap().peer = Some(peers);
|
||||
}
|
||||
|
||||
fn get_listeners(&self) -> Vec<url::Url> {
|
||||
self.config
|
||||
.lock()
|
||||
.unwrap()
|
||||
.listeners
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
fn get_listeners(&self) -> Option<Vec<url::Url>> {
|
||||
self.config.lock().unwrap().listeners.clone()
|
||||
}
|
||||
|
||||
fn set_listeners(&self, listeners: Vec<url::Url>) {
|
||||
@@ -534,6 +576,35 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
self.config.lock().unwrap().exit_nodes = Some(nodes);
|
||||
}
|
||||
|
||||
fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>> {
|
||||
self.config.lock().unwrap().routes.clone()
|
||||
}
|
||||
|
||||
fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>) {
|
||||
self.config.lock().unwrap().routes = routes;
|
||||
}
|
||||
|
||||
fn get_socks5_portal(&self) -> Option<url::Url> {
|
||||
self.config.lock().unwrap().socks5_proxy.clone()
|
||||
}
|
||||
|
||||
fn set_socks5_portal(&self, addr: Option<url::Url>) {
|
||||
self.config.lock().unwrap().socks5_proxy = addr;
|
||||
}
|
||||
|
||||
fn get_port_forwards(&self) -> Vec<PortForwardConfig> {
|
||||
self.config
|
||||
.lock()
|
||||
.unwrap()
|
||||
.port_forward
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn set_port_forwards(&self, forwards: Vec<PortForwardConfig>) {
|
||||
self.config.lock().unwrap().port_forward = Some(forwards);
|
||||
}
|
||||
|
||||
fn dump(&self) -> String {
|
||||
let default_flags_json = serde_json::to_string(&gen_default_flags()).unwrap();
|
||||
let default_flags_hashmap =
|
||||
@@ -558,22 +629,6 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
config.flags = Some(flag_map);
|
||||
toml::to_string_pretty(&config).unwrap()
|
||||
}
|
||||
|
||||
fn get_routes(&self) -> Option<Vec<cidr::Ipv4Cidr>> {
|
||||
self.config.lock().unwrap().routes.clone()
|
||||
}
|
||||
|
||||
fn set_routes(&self, routes: Option<Vec<cidr::Ipv4Cidr>>) {
|
||||
self.config.lock().unwrap().routes = routes;
|
||||
}
|
||||
|
||||
fn get_socks5_portal(&self) -> Option<url::Url> {
|
||||
self.config.lock().unwrap().socks5_proxy.clone()
|
||||
}
|
||||
|
||||
fn set_socks5_portal(&self, addr: Option<url::Url>) {
|
||||
self.config.lock().unwrap().socks5_proxy = addr;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -614,6 +669,11 @@ dir = "/tmp/easytier"
|
||||
|
||||
[console_logger]
|
||||
level = "warn"
|
||||
|
||||
[[port_forward]]
|
||||
bind_addr = "0.0.0.0:11011"
|
||||
dst_addr = "192.168.94.33:11011"
|
||||
proto = "tcp"
|
||||
"#;
|
||||
let ret = TomlConfigLoader::new_from_str(config_str);
|
||||
if let Err(e) = &ret {
|
||||
@@ -634,6 +694,14 @@ level = "warn"
|
||||
.collect::<Vec<String>>()
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
vec![PortForwardConfig {
|
||||
bind_addr: "0.0.0.0:11011".parse().unwrap(),
|
||||
dst_addr: "192.168.94.33:11011".parse().unwrap(),
|
||||
proto: "tcp".to_string(),
|
||||
}],
|
||||
ret.get_port_forwards()
|
||||
);
|
||||
println!("{}", ret.dump());
|
||||
}
|
||||
}
|
||||
|
||||
134
easytier/src/common/dns.rs
Normal file
134
easytier/src/common/dns.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use hickory_proto::runtime::TokioRuntimeProvider;
|
||||
use hickory_proto::xfer::Protocol;
|
||||
use hickory_resolver::config::{LookupIpStrategy, NameServerConfig, ResolverConfig, ResolverOpts};
|
||||
use hickory_resolver::name_server::{GenericConnector, TokioConnectionProvider};
|
||||
use hickory_resolver::system_conf::read_system_conf;
|
||||
use hickory_resolver::{Resolver, TokioResolver};
|
||||
use once_cell::sync::Lazy;
|
||||
use tokio::net::lookup_host;
|
||||
|
||||
use super::error::Error;
|
||||
|
||||
pub fn get_default_resolver_config() -> ResolverConfig {
|
||||
let mut default_resolve_config = ResolverConfig::new();
|
||||
default_resolve_config.add_name_server(NameServerConfig::new(
|
||||
"223.5.5.5:53".parse().unwrap(),
|
||||
Protocol::Udp,
|
||||
));
|
||||
default_resolve_config.add_name_server(NameServerConfig::new(
|
||||
"180.184.1.1:53".parse().unwrap(),
|
||||
Protocol::Udp,
|
||||
));
|
||||
default_resolve_config
|
||||
}
|
||||
|
||||
pub static ALLOW_USE_SYSTEM_DNS_RESOLVER: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(true));
|
||||
|
||||
pub static RESOLVER: Lazy<Arc<Resolver<GenericConnector<TokioRuntimeProvider>>>> =
|
||||
Lazy::new(|| {
|
||||
let system_cfg = read_system_conf();
|
||||
let mut cfg = get_default_resolver_config();
|
||||
let mut opt = ResolverOpts::default();
|
||||
if let Ok(s) = system_cfg {
|
||||
for ns in s.0.name_servers() {
|
||||
cfg.add_name_server(ns.clone());
|
||||
}
|
||||
opt = s.1;
|
||||
}
|
||||
opt.ip_strategy = LookupIpStrategy::Ipv4AndIpv6;
|
||||
let builder = TokioResolver::builder_with_config(cfg, TokioConnectionProvider::default())
|
||||
.with_options(opt);
|
||||
Arc::new(builder.build())
|
||||
});
|
||||
|
||||
pub async fn resolve_txt_record(domain_name: &str) -> Result<String, Error> {
|
||||
let r = RESOLVER.clone();
|
||||
let response = r.txt_lookup(domain_name).await.with_context(|| {
|
||||
format!(
|
||||
"txt_lookup failed, domain_name: {}",
|
||||
domain_name.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let txt_record = response.iter().next().with_context(|| {
|
||||
format!(
|
||||
"no txt record found, domain_name: {}",
|
||||
domain_name.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
|
||||
tracing::info!(?txt_data, ?domain_name, "get txt record");
|
||||
|
||||
Ok(txt_data.to_string())
|
||||
}
|
||||
|
||||
pub async fn socket_addrs(
|
||||
url: &url::Url,
|
||||
default_port_number: impl Fn() -> Option<u16>,
|
||||
) -> Result<Vec<SocketAddr>, Error> {
|
||||
let host = url.host_str().ok_or(Error::InvalidUrl(url.to_string()))?;
|
||||
let port = url
|
||||
.port()
|
||||
.or_else(default_port_number)
|
||||
.ok_or(Error::InvalidUrl(url.to_string()))?;
|
||||
|
||||
// if host is an ip address, return it directly
|
||||
if let Ok(ip) = host.parse::<std::net::IpAddr>() {
|
||||
return Ok(vec![SocketAddr::new(ip, port)]);
|
||||
}
|
||||
|
||||
if ALLOW_USE_SYSTEM_DNS_RESOLVER.load(std::sync::atomic::Ordering::Relaxed) {
|
||||
let socket_addr = format!("{}:{}", host, port);
|
||||
match lookup_host(socket_addr).await {
|
||||
Ok(a) => {
|
||||
let a = a.collect();
|
||||
tracing::debug!(?a, "system dns lookup done");
|
||||
return Ok(a);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "system dns lookup failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use hickory_resolver
|
||||
let ret = RESOLVER.lookup_ip(host).await.with_context(|| {
|
||||
format!(
|
||||
"hickory dns lookup_ip failed, host: {}, port: {}",
|
||||
host, port
|
||||
)
|
||||
})?;
|
||||
Ok(ret
|
||||
.iter()
|
||||
.map(|ip| SocketAddr::new(ip, port))
|
||||
.collect::<Vec<_>>())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::defer;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_socket_addrs() {
|
||||
let url = url::Url::parse("tcp://public.easytier.cn:80").unwrap();
|
||||
let addrs = socket_addrs(&url, || Some(80)).await.unwrap();
|
||||
assert_eq!(2, addrs.len(), "addrs: {:?}", addrs);
|
||||
println!("addrs: {:?}", addrs);
|
||||
|
||||
ALLOW_USE_SYSTEM_DNS_RESOLVER.store(false, std::sync::atomic::Ordering::Relaxed);
|
||||
defer!(
|
||||
ALLOW_USE_SYSTEM_DNS_RESOLVER.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
);
|
||||
let addrs = socket_addrs(&url, || Some(80)).await.unwrap();
|
||||
assert_eq!(2, addrs.len(), "addrs: {:?}", addrs);
|
||||
println!("addrs2: {:?}", addrs);
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ use std::{
|
||||
};
|
||||
|
||||
use crate::proto::cli::PeerConnInfo;
|
||||
use crate::proto::common::PeerFeatureFlag;
|
||||
use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb};
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
|
||||
use super::{
|
||||
@@ -42,6 +42,8 @@ pub enum GlobalCtxEvent {
|
||||
|
||||
DhcpIpv4Changed(Option<cidr::Ipv4Inet>, Option<cidr::Ipv4Inet>), // (old, new)
|
||||
DhcpIpv4Conflicted(Option<cidr::Ipv4Inet>),
|
||||
|
||||
PortForwardAdded(PortForwardConfigPb),
|
||||
}
|
||||
|
||||
pub type EventBus = tokio::sync::broadcast::Sender<GlobalCtxEvent>;
|
||||
@@ -59,11 +61,11 @@ pub struct GlobalCtx {
|
||||
cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>,
|
||||
cached_proxy_cidrs: AtomicCell<Option<Vec<cidr::IpCidr>>>,
|
||||
|
||||
ip_collector: Arc<IPCollector>,
|
||||
ip_collector: Mutex<Option<Arc<IPCollector>>>,
|
||||
|
||||
hostname: String,
|
||||
hostname: Mutex<String>,
|
||||
|
||||
stun_info_collection: Box<dyn StunInfoCollectorTrait>,
|
||||
stun_info_collection: Mutex<Arc<dyn StunInfoCollectorTrait>>,
|
||||
|
||||
running_listeners: Mutex<Vec<url::Url>>,
|
||||
|
||||
@@ -95,7 +97,7 @@ impl GlobalCtx {
|
||||
let net_ns = NetNS::new(config_fs.get_netns());
|
||||
let hostname = config_fs.get_hostname();
|
||||
|
||||
let (event_bus, _) = tokio::sync::broadcast::channel(1024);
|
||||
let (event_bus, _) = tokio::sync::broadcast::channel(8);
|
||||
|
||||
let stun_info_collection = Arc::new(StunInfoCollector::new_with_default_servers());
|
||||
|
||||
@@ -118,11 +120,14 @@ impl GlobalCtx {
|
||||
cached_ipv4: AtomicCell::new(None),
|
||||
cached_proxy_cidrs: AtomicCell::new(None),
|
||||
|
||||
ip_collector: Arc::new(IPCollector::new(net_ns, stun_info_collection.clone())),
|
||||
ip_collector: Mutex::new(Some(Arc::new(IPCollector::new(
|
||||
net_ns,
|
||||
stun_info_collection.clone(),
|
||||
)))),
|
||||
|
||||
hostname,
|
||||
hostname: Mutex::new(hostname),
|
||||
|
||||
stun_info_collection: Box::new(stun_info_collection),
|
||||
stun_info_collection: Mutex::new(stun_info_collection),
|
||||
|
||||
running_listeners: Mutex::new(Vec::new()),
|
||||
|
||||
@@ -139,10 +144,13 @@ impl GlobalCtx {
|
||||
}
|
||||
|
||||
pub fn issue_event(&self, event: GlobalCtxEvent) {
|
||||
if self.event_bus.receiver_count() != 0 {
|
||||
self.event_bus.send(event).unwrap();
|
||||
} else {
|
||||
tracing::warn!("No subscriber for event: {:?}", event);
|
||||
if let Err(e) = self.event_bus.send(event.clone()) {
|
||||
tracing::warn!(
|
||||
"Failed to send event: {:?}, error: {:?}, receiver count: {}",
|
||||
event,
|
||||
e,
|
||||
self.event_bus.receiver_count()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,26 +218,30 @@ impl GlobalCtx {
|
||||
}
|
||||
|
||||
pub fn get_ip_collector(&self) -> Arc<IPCollector> {
|
||||
self.ip_collector.clone()
|
||||
self.ip_collector.lock().unwrap().as_ref().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn get_hostname(&self) -> String {
|
||||
return self.hostname.clone();
|
||||
return self.hostname.lock().unwrap().clone();
|
||||
}
|
||||
|
||||
pub fn get_stun_info_collector(&self) -> impl StunInfoCollectorTrait + '_ {
|
||||
self.stun_info_collection.as_ref()
|
||||
pub fn set_hostname(&self, hostname: String) {
|
||||
*self.hostname.lock().unwrap() = hostname;
|
||||
}
|
||||
|
||||
pub fn get_stun_info_collector(&self) -> Arc<dyn StunInfoCollectorTrait> {
|
||||
self.stun_info_collection.lock().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn replace_stun_info_collector(&self, collector: Box<dyn StunInfoCollectorTrait>) {
|
||||
// force replace the stun_info_collection without mut and drop the old one
|
||||
let ptr = &self.stun_info_collection as *const Box<dyn StunInfoCollectorTrait>;
|
||||
let ptr = ptr as *mut Box<dyn StunInfoCollectorTrait>;
|
||||
unsafe {
|
||||
std::ptr::drop_in_place(ptr);
|
||||
#[allow(invalid_reference_casting)]
|
||||
std::ptr::write(ptr, collector);
|
||||
}
|
||||
let arc_collector: Arc<dyn StunInfoCollectorTrait> = Arc::new(collector);
|
||||
*self.stun_info_collection.lock().unwrap() = arc_collector.clone();
|
||||
|
||||
// rebuild the ip collector
|
||||
*self.ip_collector.lock().unwrap() = Some(Arc::new(IPCollector::new(
|
||||
self.net_ns.clone(),
|
||||
arc_collector,
|
||||
)));
|
||||
}
|
||||
|
||||
pub fn get_running_listeners(&self) -> Vec<url::Url> {
|
||||
@@ -295,7 +307,10 @@ impl GlobalCtx {
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use crate::common::{config::TomlConfigLoader, new_peer_id};
|
||||
use crate::{
|
||||
common::{config::TomlConfigLoader, new_peer_id, stun::MockStunInfoCollector},
|
||||
proto::common::NatType,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -335,7 +350,12 @@ pub mod tests {
|
||||
let config_fs = TomlConfigLoader::default();
|
||||
config_fs.set_inst_name(format!("test_{}", config_fs.get_id()));
|
||||
config_fs.set_network_identity(network_identy.unwrap_or(NetworkIdentity::default()));
|
||||
std::sync::Arc::new(GlobalCtx::new(config_fs))
|
||||
|
||||
let ctx = Arc::new(GlobalCtx::new(config_fs));
|
||||
ctx.replace_stun_info_collector(Box::new(MockStunInfoCollector {
|
||||
udp_nat_type: NatType::Unknown,
|
||||
}));
|
||||
ctx
|
||||
}
|
||||
|
||||
pub fn get_mock_global_ctx() -> ArcGlobalCtx {
|
||||
|
||||
@@ -12,13 +12,15 @@ impl IfConfiguerTrait for MacIfConfiger {
|
||||
name: &str,
|
||||
address: Ipv4Addr,
|
||||
cidr_prefix: u8,
|
||||
cost: Option<i32>,
|
||||
) -> Result<(), Error> {
|
||||
run_shell_cmd(
|
||||
format!(
|
||||
"route -n add {} -netmask {} -interface {} -hopcount 7",
|
||||
"route -n add {} -netmask {} -interface {} -hopcount {}",
|
||||
address,
|
||||
cidr_to_subnet_mask(cidr_prefix),
|
||||
name
|
||||
name,
|
||||
cost.unwrap_or(7)
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
|
||||
@@ -21,6 +21,7 @@ pub trait IfConfiguerTrait: Send + Sync {
|
||||
_name: &str,
|
||||
_address: Ipv4Addr,
|
||||
_cidr_prefix: u8,
|
||||
_cost: Option<i32>,
|
||||
) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
@@ -125,3 +126,6 @@ pub type IfConfiger = windows::WindowsIfConfiger;
|
||||
target_os = "freebsd",
|
||||
)))]
|
||||
pub type IfConfiger = DummyIfConfiger;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub use windows::RegistryManager;
|
||||
|
||||
@@ -350,6 +350,7 @@ impl IfConfiguerTrait for NetlinkIfConfiger {
|
||||
name: &str,
|
||||
address: Ipv4Addr,
|
||||
cidr_prefix: u8,
|
||||
cost: Option<i32>,
|
||||
) -> Result<(), Error> {
|
||||
let mut message = RouteMessage::default();
|
||||
|
||||
@@ -359,7 +360,9 @@ impl IfConfiguerTrait for NetlinkIfConfiger {
|
||||
message.header.kind = RouteType::Unicast;
|
||||
message.header.address_family = AddressFamily::Inet;
|
||||
// metric
|
||||
message.attributes.push(RouteAttribute::Priority(65535));
|
||||
message
|
||||
.attributes
|
||||
.push(RouteAttribute::Priority(cost.unwrap_or(65535) as u32));
|
||||
// output interface
|
||||
message
|
||||
.attributes
|
||||
@@ -550,7 +553,7 @@ mod tests {
|
||||
ifcfg.set_link_status(DUMMY_IFACE_NAME, true).await.unwrap();
|
||||
|
||||
ifcfg
|
||||
.add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24)
|
||||
.add_ipv4_route(DUMMY_IFACE_NAME, "10.5.5.0".parse().unwrap(), 24, None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::net::Ipv4Addr;
|
||||
use std::{io, net::Ipv4Addr};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use winreg::{
|
||||
enums::{HKEY_LOCAL_MACHINE, KEY_READ, KEY_WRITE},
|
||||
RegKey,
|
||||
};
|
||||
|
||||
use super::{cidr_to_subnet_mask, run_shell_cmd, Error, IfConfiguerTrait};
|
||||
|
||||
@@ -59,16 +63,18 @@ impl IfConfiguerTrait for WindowsIfConfiger {
|
||||
name: &str,
|
||||
address: Ipv4Addr,
|
||||
cidr_prefix: u8,
|
||||
cost: Option<i32>,
|
||||
) -> Result<(), Error> {
|
||||
let Some(idx) = Self::get_interface_index(name) else {
|
||||
return Err(Error::NotFound);
|
||||
};
|
||||
run_shell_cmd(
|
||||
format!(
|
||||
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC 9000",
|
||||
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC {}",
|
||||
address,
|
||||
cidr_to_subnet_mask(cidr_prefix),
|
||||
idx
|
||||
idx,
|
||||
cost.unwrap_or(9000)
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
@@ -164,3 +170,220 @@ impl IfConfiguerTrait for WindowsIfConfiger {
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RegistryManager;
|
||||
|
||||
impl RegistryManager {
|
||||
pub const IPV4_TCPIP_INTERFACE_PREFIX: &str =
|
||||
r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\";
|
||||
pub const IPV6_TCPIP_INTERFACE_PREFIX: &str =
|
||||
r"SYSTEM\CurrentControlSet\Services\Tcpip6\Parameters\Interfaces\";
|
||||
pub const NETBT_INTERFACE_PREFIX: &str =
|
||||
r"SYSTEM\CurrentControlSet\Services\NetBT\Parameters\Interfaces\Tcpip_";
|
||||
|
||||
pub fn reg_delete_obsoleted_items(dev_name: &str) -> io::Result<()> {
|
||||
use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey};
|
||||
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
|
||||
let profiles_key = hklm.open_subkey_with_flags(
|
||||
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles",
|
||||
KEY_ALL_ACCESS,
|
||||
)?;
|
||||
let unmanaged_key = hklm.open_subkey_with_flags(
|
||||
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Signatures\\Unmanaged",
|
||||
KEY_ALL_ACCESS,
|
||||
)?;
|
||||
// collect subkeys to delete
|
||||
let mut keys_to_delete = Vec::new();
|
||||
let mut keys_to_delete_unmanaged = Vec::new();
|
||||
for subkey_name in profiles_key.enum_keys().filter_map(Result::ok) {
|
||||
let subkey = profiles_key.open_subkey(&subkey_name)?;
|
||||
// check if ProfileName contains "et"
|
||||
match subkey.get_value::<String, _>("ProfileName") {
|
||||
Ok(profile_name) => {
|
||||
if profile_name.contains("et_")
|
||||
|| (!dev_name.is_empty() && dev_name == profile_name)
|
||||
{
|
||||
keys_to_delete.push(subkey_name);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Failed to read ProfileName for subkey {}: {}",
|
||||
subkey_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
for subkey_name in unmanaged_key.enum_keys().filter_map(Result::ok) {
|
||||
let subkey = unmanaged_key.open_subkey(&subkey_name)?;
|
||||
// check if ProfileName contains "et"
|
||||
match subkey.get_value::<String, _>("Description") {
|
||||
Ok(profile_name) => {
|
||||
if profile_name.contains("et_")
|
||||
|| (!dev_name.is_empty() && dev_name == profile_name)
|
||||
{
|
||||
keys_to_delete_unmanaged.push(subkey_name);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Failed to read ProfileName for subkey {}: {}",
|
||||
subkey_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
// delete collected subkeys
|
||||
if !keys_to_delete.is_empty() {
|
||||
for subkey_name in keys_to_delete {
|
||||
match profiles_key.delete_subkey_all(&subkey_name) {
|
||||
Ok(_) => tracing::trace!("Successfully deleted subkey: {}", subkey_name),
|
||||
Err(e) => tracing::error!("Failed to delete subkey {}: {}", subkey_name, e),
|
||||
}
|
||||
}
|
||||
}
|
||||
if !keys_to_delete_unmanaged.is_empty() {
|
||||
for subkey_name in keys_to_delete_unmanaged {
|
||||
match unmanaged_key.delete_subkey_all(&subkey_name) {
|
||||
Ok(_) => tracing::trace!("Successfully deleted subkey: {}", subkey_name),
|
||||
Err(e) => tracing::error!("Failed to delete subkey {}: {}", subkey_name, e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn reg_change_catrgory_in_profile(dev_name: &str) -> io::Result<()> {
|
||||
use winreg::{enums::HKEY_LOCAL_MACHINE, enums::KEY_ALL_ACCESS, RegKey};
|
||||
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
|
||||
let profiles_key = hklm.open_subkey_with_flags(
|
||||
"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\NetworkList\\Profiles",
|
||||
KEY_ALL_ACCESS,
|
||||
)?;
|
||||
|
||||
for subkey_name in profiles_key.enum_keys().filter_map(Result::ok) {
|
||||
let subkey = profiles_key.open_subkey_with_flags(&subkey_name, KEY_ALL_ACCESS)?;
|
||||
match subkey.get_value::<String, _>("ProfileName") {
|
||||
Ok(profile_name) => {
|
||||
if !dev_name.is_empty() && dev_name == profile_name {
|
||||
match subkey.set_value("Category", &1u32) {
|
||||
Ok(_) => tracing::trace!("Successfully set Category in registry"),
|
||||
Err(e) => tracing::error!("Failed to set Category in registry: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"Failed to read ProfileName for subkey {}: {}",
|
||||
subkey_name,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 根据接口名称查找 GUID
|
||||
pub fn find_interface_guid(interface_name: &str) -> io::Result<String> {
|
||||
// 注册表路径:所有网络接口的根目录
|
||||
let network_key_path =
|
||||
r"SYSTEM\CurrentControlSet\Control\Network\{4D36E972-E325-11CE-BFC1-08002BE10318}";
|
||||
|
||||
let hklm = RegKey::predef(HKEY_LOCAL_MACHINE);
|
||||
let network_key = hklm.open_subkey_with_flags(network_key_path, KEY_READ)?;
|
||||
|
||||
// 遍历该路径下的所有 GUID 子键
|
||||
for guid in network_key.enum_keys().map_while(Result::ok) {
|
||||
if let Ok(guid_key) = network_key.open_subkey_with_flags(&guid, KEY_READ) {
|
||||
// 检查 Connection/Name 是否匹配目标接口名
|
||||
if let Ok(conn_key) = guid_key.open_subkey_with_flags("Connection", KEY_READ) {
|
||||
if let Ok(name) = conn_key.get_value::<String, _>("Name") {
|
||||
if name == interface_name {
|
||||
return Ok(guid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 如果没有找到对应的接口
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::NotFound,
|
||||
"Interface not found",
|
||||
))
|
||||
}
|
||||
|
||||
// 打开注册表键
|
||||
pub fn open_interface_key(interface_guid: &str, prefix: &str) -> io::Result<RegKey> {
|
||||
let path = format!(r"{}{}", prefix, interface_guid);
|
||||
let hkey_local_machine = RegKey::predef(HKEY_LOCAL_MACHINE);
|
||||
hkey_local_machine.open_subkey_with_flags(&path, KEY_WRITE)
|
||||
}
|
||||
|
||||
// 禁用动态 DNS 更新
|
||||
// disableDynamicUpdates sets the appropriate registry values to prevent the
|
||||
// Windows DHCP client from sending dynamic DNS updates for our interface to
|
||||
// AD domain controllers.
|
||||
pub fn disable_dynamic_updates(interface_guid: &str) -> io::Result<()> {
|
||||
let prefixes = [
|
||||
Self::IPV4_TCPIP_INTERFACE_PREFIX,
|
||||
Self::IPV6_TCPIP_INTERFACE_PREFIX,
|
||||
];
|
||||
|
||||
for prefix in &prefixes {
|
||||
let key = match Self::open_interface_key(interface_guid, prefix) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
// 模拟 mute-key-not-found-if-closing 行为
|
||||
if matches!(e.kind(), io::ErrorKind::NotFound) {
|
||||
continue;
|
||||
} else {
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
key.set_value("RegistrationEnabled", &0u32)?;
|
||||
key.set_value("DisableDynamicUpdate", &1u32)?;
|
||||
key.set_value("MaxNumberOfAddressesToRegister", &0u32)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 设置单个 DWORD 值到指定的注册表路径下
|
||||
fn set_single_dword(
|
||||
interface_guid: &str,
|
||||
prefix: &str,
|
||||
value_name: &str,
|
||||
data: u32,
|
||||
) -> io::Result<()> {
|
||||
let key = match Self::open_interface_key(interface_guid, prefix) {
|
||||
Ok(k) => k,
|
||||
Err(e) => {
|
||||
// 模拟 muteKeyNotFoundIfClosing 行为:忽略 Key Not Found 错误
|
||||
return if matches!(e.kind(), io::ErrorKind::NotFound) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(e)
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
key.set_value(value_name, &data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 禁用 NetBIOS 名称解析请求
|
||||
pub fn disable_netbios(interface_guid: &str) -> io::Result<()> {
|
||||
Self::set_single_dword(
|
||||
interface_guid,
|
||||
Self::NETBT_INTERFACE_PREFIX,
|
||||
"NetbiosOptions",
|
||||
2,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ pub mod compressor;
|
||||
pub mod config;
|
||||
pub mod constants;
|
||||
pub mod defer;
|
||||
pub mod dns;
|
||||
pub mod error;
|
||||
pub mod global_ctx;
|
||||
pub mod ifcfg;
|
||||
|
||||
@@ -179,18 +179,16 @@ impl IPCollector {
|
||||
Self::do_collect_local_ip_addrs(self.net_ns.clone()).await;
|
||||
let net_ns = self.net_ns.clone();
|
||||
let stun_info_collector = self.stun_info_collector.clone();
|
||||
task.spawn(async move {
|
||||
loop {
|
||||
let ip_addrs = Self::do_collect_local_ip_addrs(net_ns.clone()).await;
|
||||
*cached_ip_list.write().await = ip_addrs;
|
||||
tokio::time::sleep(std::time::Duration::from_secs(CACHED_IP_LIST_TIMEOUT_SEC))
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
let cached_ip_list = self.cached_ip_list.clone();
|
||||
task.spawn(async move {
|
||||
let mut last_fetch_iface_time = std::time::Instant::now();
|
||||
loop {
|
||||
if last_fetch_iface_time.elapsed().as_secs() > CACHED_IP_LIST_TIMEOUT_SEC {
|
||||
let ifaces = Self::do_collect_local_ip_addrs(net_ns.clone()).await;
|
||||
*cached_ip_list.write().await = ifaces;
|
||||
last_fetch_iface_time = std::time::Instant::now();
|
||||
}
|
||||
|
||||
let stun_info = stun_info_collector.get_stun_info();
|
||||
for ip in stun_info.public_ip.iter() {
|
||||
let Ok(ip_addr) = ip.parse::<IpAddr>() else {
|
||||
@@ -199,14 +197,20 @@ impl IPCollector {
|
||||
|
||||
match ip_addr {
|
||||
IpAddr::V4(v) => {
|
||||
cached_ip_list.write().await.public_ipv4 = Some(v.into())
|
||||
cached_ip_list.write().await.public_ipv4.replace(v.into());
|
||||
}
|
||||
IpAddr::V6(v) => {
|
||||
cached_ip_list.write().await.public_ipv6 = Some(v.into())
|
||||
cached_ip_list.write().await.public_ipv6.replace(v.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!(
|
||||
"got public ip: {:?}, {:?}",
|
||||
cached_ip_list.read().await.public_ipv4,
|
||||
cached_ip_list.read().await.public_ipv6
|
||||
);
|
||||
|
||||
let sleep_sec = if !cached_ip_list.read().await.public_ipv4.is_none() {
|
||||
CACHED_IP_LIST_TIMEOUT_SEC
|
||||
} else {
|
||||
@@ -217,10 +221,10 @@ impl IPCollector {
|
||||
});
|
||||
}
|
||||
|
||||
return self.cached_ip_list.read().await.deref().clone();
|
||||
self.cached_ip_list.read().await.deref().clone()
|
||||
}
|
||||
|
||||
pub async fn collect_interfaces(net_ns: NetNS) -> Vec<NetworkInterface> {
|
||||
pub async fn collect_interfaces(net_ns: NetNS, filter: bool) -> Vec<NetworkInterface> {
|
||||
let _g = net_ns.guard();
|
||||
let ifaces = pnet::datalink::interfaces();
|
||||
let mut ret = vec![];
|
||||
@@ -229,7 +233,7 @@ impl IPCollector {
|
||||
iface: iface.clone(),
|
||||
};
|
||||
|
||||
if !f.filter_iface().await {
|
||||
if filter && !f.filter_iface().await {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -243,21 +247,36 @@ impl IPCollector {
|
||||
async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse {
|
||||
let mut ret = GetIpListResponse::default();
|
||||
|
||||
let ifaces = Self::collect_interfaces(net_ns.clone()).await;
|
||||
let ifaces = Self::collect_interfaces(net_ns.clone(), true).await;
|
||||
let _g = net_ns.guard();
|
||||
for iface in ifaces {
|
||||
for ip in iface.ips {
|
||||
let ip: std::net::IpAddr = ip.ip();
|
||||
if ip.is_loopback() || ip.is_multicast() {
|
||||
continue;
|
||||
}
|
||||
match ip {
|
||||
std::net::IpAddr::V4(v4) => {
|
||||
if ip.is_loopback() || ip.is_multicast() {
|
||||
continue;
|
||||
}
|
||||
ret.interface_ipv4s.push(v4.into());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let ifaces = Self::collect_interfaces(net_ns.clone(), false).await;
|
||||
let _g = net_ns.guard();
|
||||
for iface in ifaces {
|
||||
for ip in iface.ips {
|
||||
let ip: std::net::IpAddr = ip.ip();
|
||||
match ip {
|
||||
std::net::IpAddr::V6(v6) => {
|
||||
if v6.is_multicast() || v6.is_loopback() || v6.is_unicast_link_local() {
|
||||
continue;
|
||||
}
|
||||
ret.interface_ipv6s.push(v6.into());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@ use crate::proto::common::{NatType, StunInfo};
|
||||
use anyhow::Context;
|
||||
use chrono::Local;
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use hickory_resolver::config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts};
|
||||
use hickory_resolver::TokioAsyncResolver;
|
||||
use rand::seq::IteratorRandom;
|
||||
use tokio::net::{lookup_host, UdpSocket};
|
||||
use tokio::sync::{broadcast, Mutex};
|
||||
@@ -22,45 +20,9 @@ use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder};
|
||||
|
||||
use crate::common::error::Error;
|
||||
|
||||
use super::dns::resolve_txt_record;
|
||||
use super::stun_codec_ext::*;
|
||||
|
||||
pub fn get_default_resolver_config() -> ResolverConfig {
|
||||
let mut default_resolve_config = ResolverConfig::new();
|
||||
default_resolve_config.add_name_server(NameServerConfig::new(
|
||||
"223.5.5.5:53".parse().unwrap(),
|
||||
Protocol::Udp,
|
||||
));
|
||||
default_resolve_config.add_name_server(NameServerConfig::new(
|
||||
"180.184.1.1:53".parse().unwrap(),
|
||||
Protocol::Udp,
|
||||
));
|
||||
default_resolve_config
|
||||
}
|
||||
|
||||
pub async fn resolve_txt_record(
|
||||
domain_name: &str,
|
||||
resolver: &TokioAsyncResolver,
|
||||
) -> Result<String, Error> {
|
||||
let response = resolver.txt_lookup(domain_name).await.with_context(|| {
|
||||
format!(
|
||||
"txt_lookup failed, domain_name: {}",
|
||||
domain_name.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let txt_record = response.iter().next().with_context(|| {
|
||||
format!(
|
||||
"no txt record found, domain_name: {}",
|
||||
domain_name.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
|
||||
tracing::info!(?txt_data, ?domain_name, "get txt record");
|
||||
|
||||
Ok(txt_data.to_string())
|
||||
}
|
||||
|
||||
struct HostResolverIter {
|
||||
hostnames: Vec<String>,
|
||||
ips: Vec<SocketAddr>,
|
||||
@@ -79,10 +41,7 @@ impl HostResolverIter {
|
||||
}
|
||||
|
||||
async fn get_txt_record(domain_name: &str) -> Result<Vec<String>, Error> {
|
||||
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap_or(
|
||||
TokioAsyncResolver::tokio(get_default_resolver_config(), ResolverOpts::default()),
|
||||
);
|
||||
let txt_data = resolve_txt_record(domain_name, &resolver).await?;
|
||||
let txt_data = resolve_txt_record(domain_name).await?;
|
||||
Ok(txt_data.split(" ").map(|x| x.to_string()).collect())
|
||||
}
|
||||
|
||||
@@ -802,7 +761,10 @@ impl StunInfoCollector {
|
||||
async fn get_public_ipv6(servers: &Vec<String>) -> Option<Ipv6Addr> {
|
||||
let mut ips = HostResolverIter::new(servers.to_vec(), 10, true);
|
||||
while let Some(ip) = ips.next().await {
|
||||
let udp = Arc::new(UdpSocket::bind(format!("[::]:0")).await.unwrap());
|
||||
let Ok(udp_socket) = UdpSocket::bind(format!("[::]:0")).await else {
|
||||
break;
|
||||
};
|
||||
let udp = Arc::new(udp_socket);
|
||||
let ret = StunClientBuilder::new(udp.clone())
|
||||
.new_stun_client(ip)
|
||||
.bind_request(false, false)
|
||||
@@ -928,7 +890,7 @@ impl StunInfoCollectorTrait for MockStunInfoCollector {
|
||||
last_update_time: std::time::Instant::now().elapsed().as_secs() as i64,
|
||||
min_port: 100,
|
||||
max_port: 200,
|
||||
public_ip: vec!["127.0.0.1".to_string()],
|
||||
public_ip: vec!["127.0.0.1".to_string(), "::1".to_string()],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,28 +12,31 @@ use std::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
|
||||
common::{error::Error, global_ctx::ArcGlobalCtx, stun::StunInfoCollectorTrait, PeerId},
|
||||
peers::{
|
||||
peer_manager::PeerManager, peer_rpc::PeerRpcManager,
|
||||
peer_conn::PeerConnId,
|
||||
peer_manager::PeerManager,
|
||||
peer_rpc::PeerRpcManager,
|
||||
peer_rpc_service::DirectConnectorManagerRpcServer,
|
||||
peer_task::{PeerTaskLauncher, PeerTaskManager},
|
||||
},
|
||||
proto::{
|
||||
peer_rpc::{
|
||||
DirectConnectorRpc, DirectConnectorRpcClientFactory, DirectConnectorRpcServer,
|
||||
GetIpListRequest, GetIpListResponse,
|
||||
GetIpListRequest, GetIpListResponse, SendV6HolePunchPacketRequest,
|
||||
},
|
||||
rpc_types::controller::BaseController,
|
||||
},
|
||||
tunnel::{udp::UdpTunnelConnector, IpVersion},
|
||||
};
|
||||
|
||||
use crate::proto::cli::PeerConnInfo;
|
||||
use anyhow::Context;
|
||||
use rand::Rng;
|
||||
use tokio::{task::JoinSet, time::timeout};
|
||||
use tracing::Instrument;
|
||||
use tokio::{net::UdpSocket, task::JoinSet, time::timeout};
|
||||
use url::Host;
|
||||
|
||||
use super::create_connector_by_url;
|
||||
use super::{create_connector_by_url, udp_hole_punch};
|
||||
|
||||
pub const DIRECT_CONNECTOR_SERVICE_ID: u32 = 1;
|
||||
pub const DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC: u64 = 300;
|
||||
@@ -76,7 +79,7 @@ impl PeerManagerForDirectConnector for PeerManager {
|
||||
struct DstBlackListItem(PeerId, String);
|
||||
|
||||
#[derive(Hash, Eq, PartialEq, Clone)]
|
||||
struct DstListenerUrlBlackListItem(PeerId, url::Url);
|
||||
struct DstListenerUrlBlackListItem(PeerId, String);
|
||||
|
||||
struct DirectConnectorManagerData {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
@@ -92,95 +95,114 @@ impl DirectConnectorManagerData {
|
||||
dst_listener_blacklist: timedmap::TimedMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for DirectConnectorManagerData {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DirectConnectorManagerData")
|
||||
.field("peer_manager", &self.peer_manager)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
async fn remote_send_v6_hole_punch_packet(
|
||||
&self,
|
||||
dst_peer_id: PeerId,
|
||||
local_socket: &UdpSocket,
|
||||
remote_url: &url::Url,
|
||||
) -> Result<(), Error> {
|
||||
let global_ctx = self.peer_manager.get_global_ctx();
|
||||
let listener_port = remote_url.port().ok_or(anyhow::anyhow!(
|
||||
"failed to parse port from remote url: {}",
|
||||
remote_url
|
||||
))?;
|
||||
let connector_ip = global_ctx
|
||||
.get_stun_info_collector()
|
||||
.get_stun_info()
|
||||
.public_ip
|
||||
.iter()
|
||||
.find(|x| x.contains(":"))
|
||||
.ok_or(anyhow::anyhow!(
|
||||
"failed to get public ipv6 address from stun info"
|
||||
))?
|
||||
.parse::<std::net::Ipv6Addr>()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to parse public ipv6 address from stun info: {:?}",
|
||||
global_ctx.get_stun_info_collector().get_stun_info()
|
||||
)
|
||||
})?;
|
||||
let connector_addr = SocketAddr::new(
|
||||
std::net::IpAddr::V6(connector_ip),
|
||||
local_socket.local_addr()?.port(),
|
||||
);
|
||||
|
||||
pub struct DirectConnectorManager {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
data: Arc<DirectConnectorManagerData>,
|
||||
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
|
||||
impl DirectConnectorManager {
|
||||
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
|
||||
Self {
|
||||
global_ctx: global_ctx.clone(),
|
||||
data: Arc::new(DirectConnectorManagerData::new(global_ctx, peer_manager)),
|
||||
tasks: JoinSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(&mut self) {
|
||||
if self.global_ctx.get_flags().disable_p2p {
|
||||
return;
|
||||
}
|
||||
|
||||
self.run_as_server();
|
||||
self.run_as_client();
|
||||
}
|
||||
|
||||
pub fn run_as_server(&mut self) {
|
||||
self.data
|
||||
let rpc_stub = self
|
||||
.peer_manager
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_server()
|
||||
.registry()
|
||||
.register(
|
||||
DirectConnectorRpcServer::new(DirectConnectorManagerRpcServer::new(
|
||||
self.global_ctx.clone(),
|
||||
)),
|
||||
&self.data.global_ctx.get_network_name(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn run_as_client(&mut self) {
|
||||
let data = self.data.clone();
|
||||
let my_peer_id = self.data.peer_manager.my_peer_id();
|
||||
self.tasks.spawn(
|
||||
async move {
|
||||
loop {
|
||||
let peers = data.peer_manager.list_peers().await;
|
||||
let mut tasks = JoinSet::new();
|
||||
for peer_id in peers {
|
||||
if peer_id == my_peer_id
|
||||
|| data.peer_manager.has_directly_connected_conn(peer_id)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
tasks.spawn(Self::do_try_direct_connect(data.clone(), peer_id));
|
||||
}
|
||||
|
||||
while let Some(task_ret) = tasks.join_next().await {
|
||||
tracing::debug!(?task_ret, ?my_peer_id, "direct connect task ret");
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
.instrument(
|
||||
tracing::info_span!("direct_connector_client", my_id = ?self.global_ctx.id),
|
||||
),
|
||||
.rpc_client()
|
||||
.scoped_client::<DirectConnectorRpcClientFactory<BaseController>>(
|
||||
self.peer_manager.my_peer_id(),
|
||||
dst_peer_id,
|
||||
global_ctx.get_network_name(),
|
||||
);
|
||||
|
||||
rpc_stub
|
||||
.send_v6_hole_punch_packet(
|
||||
BaseController::default(),
|
||||
SendV6HolePunchPacketRequest {
|
||||
listener_port: listener_port as u32,
|
||||
connector_addr: Some(connector_addr.into()),
|
||||
},
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"do rpc, send v6 hole punch packet to peer {} at {}",
|
||||
dst_peer_id, remote_url
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn do_try_connect_to_ip(
|
||||
data: Arc<DirectConnectorManagerData>,
|
||||
async fn connect_to_public_ipv6(
|
||||
&self,
|
||||
dst_peer_id: PeerId,
|
||||
addr: String,
|
||||
) -> Result<(), Error> {
|
||||
let connector = create_connector_by_url(&addr, &data.global_ctx).await?;
|
||||
let (peer_id, conn_id) = timeout(
|
||||
std::time::Duration::from_secs(3),
|
||||
data.peer_manager.try_direct_connect(connector),
|
||||
remote_url: &url::Url,
|
||||
) -> Result<(PeerId, PeerConnId), Error> {
|
||||
let local_socket = Arc::new(
|
||||
UdpSocket::bind("[::]:0")
|
||||
.await
|
||||
.with_context(|| format!("failed to bind local socket for {}", remote_url))?,
|
||||
);
|
||||
|
||||
// ask remote to send v6 hole punch packet
|
||||
// and no matter what the result is, continue to connect
|
||||
let _ = self
|
||||
.remote_send_v6_hole_punch_packet(dst_peer_id, &local_socket, &remote_url)
|
||||
.await;
|
||||
|
||||
let udp_connector = UdpTunnelConnector::new(remote_url.clone());
|
||||
let remote_addr = super::check_scheme_and_get_socket_addr::<SocketAddr>(
|
||||
&remote_url,
|
||||
"udp",
|
||||
IpVersion::V6,
|
||||
)
|
||||
.await??;
|
||||
.await?;
|
||||
let ret = udp_connector
|
||||
.try_connect_with_socket(local_socket, remote_addr)
|
||||
.await?;
|
||||
|
||||
// NOTICE: must add as directly connected tunnel
|
||||
self.peer_manager.add_direct_tunnel(ret).await
|
||||
}
|
||||
|
||||
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
|
||||
let connector = create_connector_by_url(&addr, &self.global_ctx, IpVersion::Both).await?;
|
||||
let remote_url = connector.remote_url();
|
||||
let (peer_id, conn_id) =
|
||||
if remote_url.scheme() == "udp" && matches!(remote_url.host(), Some(Host::Ipv6(_))) {
|
||||
self.connect_to_public_ipv6(dst_peer_id, &remote_url)
|
||||
.await?
|
||||
} else {
|
||||
timeout(
|
||||
std::time::Duration::from_secs(3),
|
||||
self.peer_manager.try_direct_connect(connector),
|
||||
)
|
||||
.await??
|
||||
};
|
||||
|
||||
if peer_id != dst_peer_id && !TESTING.load(Ordering::Relaxed) {
|
||||
tracing::info!(
|
||||
@@ -189,7 +211,7 @@ impl DirectConnectorManager {
|
||||
dst_peer_id,
|
||||
peer_id
|
||||
);
|
||||
data.peer_manager
|
||||
self.peer_manager
|
||||
.get_peer_map()
|
||||
.close_peer_conn(peer_id, &conn_id)
|
||||
.await?;
|
||||
@@ -199,21 +221,44 @@ impl DirectConnectorManager {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn try_connect_to_ip(
|
||||
data: Arc<DirectConnectorManagerData>,
|
||||
self: Arc<DirectConnectorManagerData>,
|
||||
dst_peer_id: PeerId,
|
||||
addr: String,
|
||||
) -> Result<(), Error> {
|
||||
let mut rand_gen = rand::rngs::OsRng::default();
|
||||
let backoff_ms = vec![1000, 2000];
|
||||
let backoff_ms = vec![1000, 2000, 4000];
|
||||
let mut backoff_idx = 0;
|
||||
|
||||
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start");
|
||||
|
||||
self.dst_listener_blacklist.cleanup();
|
||||
|
||||
if self
|
||||
.dst_listener_blacklist
|
||||
.contains(&DstListenerUrlBlackListItem(
|
||||
dst_peer_id.clone(),
|
||||
addr.clone(),
|
||||
))
|
||||
{
|
||||
return Err(Error::UrlInBlacklist);
|
||||
}
|
||||
|
||||
loop {
|
||||
let ret = Self::do_try_connect_to_ip(data.clone(), dst_peer_id, addr.clone()).await;
|
||||
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
tracing::debug!(?dst_peer_id, ?addr, "try_connect_to_ip start one round");
|
||||
let ret = self.do_try_connect_to_ip(dst_peer_id, addr.clone()).await;
|
||||
tracing::debug!(?ret, ?dst_peer_id, ?addr, "try_connect_to_ip return");
|
||||
if matches!(ret, Err(Error::UrlInBlacklist) | Ok(_)) {
|
||||
return ret;
|
||||
if ret.is_ok() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if backoff_idx < backoff_ms.len() {
|
||||
@@ -229,49 +274,29 @@ impl DirectConnectorManager {
|
||||
backoff_idx += 1;
|
||||
continue;
|
||||
} else {
|
||||
self.dst_listener_blacklist.insert(
|
||||
DstListenerUrlBlackListItem(dst_peer_id.clone(), addr),
|
||||
(),
|
||||
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
|
||||
);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
async fn do_try_direct_connect_internal(
|
||||
data: Arc<DirectConnectorManagerData>,
|
||||
fn spawn_direct_connect_task(
|
||||
self: &Arc<DirectConnectorManagerData>,
|
||||
dst_peer_id: PeerId,
|
||||
ip_list: GetIpListResponse,
|
||||
) -> Result<(), Error> {
|
||||
data.dst_listener_blacklist.cleanup();
|
||||
|
||||
let enable_ipv6 = data.global_ctx.get_flags().enable_ipv6;
|
||||
let available_listeners = ip_list
|
||||
.listeners
|
||||
.into_iter()
|
||||
.map(Into::<url::Url>::into)
|
||||
.filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None })
|
||||
.filter(|l| l.port().is_some() && l.host().is_some())
|
||||
.filter(|l| {
|
||||
!data
|
||||
.dst_listener_blacklist
|
||||
.contains(&DstListenerUrlBlackListItem(dst_peer_id.clone(), l.clone()))
|
||||
})
|
||||
.filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tracing::debug!(?available_listeners, "got available listeners");
|
||||
|
||||
if available_listeners.is_empty() {
|
||||
return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into());
|
||||
}
|
||||
|
||||
// if have default listener, use it first
|
||||
let listener = available_listeners
|
||||
.iter()
|
||||
.find(|l| l.scheme() == data.global_ctx.get_flags().default_protocol)
|
||||
.unwrap_or(available_listeners.get(0).unwrap());
|
||||
|
||||
let mut tasks = bounded_join_set::JoinSet::new(2);
|
||||
|
||||
let listener_host = listener.socket_addrs(|| None).unwrap().pop();
|
||||
ip_list: &GetIpListResponse,
|
||||
listener: &url::Url,
|
||||
tasks: &mut JoinSet<Result<(), Error>>,
|
||||
) {
|
||||
let Ok(mut addrs) = listener.socket_addrs(|| None) else {
|
||||
tracing::error!(?listener, "failed to parse socket address from listener");
|
||||
return;
|
||||
};
|
||||
let listener_host = addrs.pop();
|
||||
tracing::info!(?listener_host, ?listener, "try direct connect to peer");
|
||||
match listener_host {
|
||||
Some(SocketAddr::V4(s_addr)) => {
|
||||
if s_addr.ip().is_unspecified() {
|
||||
@@ -283,7 +308,7 @@ impl DirectConnectorManager {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
self.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
@@ -298,7 +323,7 @@ impl DirectConnectorManager {
|
||||
});
|
||||
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
self.clone(),
|
||||
dst_peer_id.clone(),
|
||||
listener.to_string(),
|
||||
));
|
||||
@@ -329,7 +354,7 @@ impl DirectConnectorManager {
|
||||
.is_ok()
|
||||
{
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
self.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
@@ -344,7 +369,7 @@ impl DirectConnectorManager {
|
||||
});
|
||||
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
self.clone(),
|
||||
dst_peer_id.clone(),
|
||||
listener.to_string(),
|
||||
));
|
||||
@@ -354,64 +379,230 @@ impl DirectConnectorManager {
|
||||
tracing::error!(?p, ?listener, "failed to parse ip version from listener");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut has_succ = false;
|
||||
while let Some(ret) = tasks.join_next().await {
|
||||
match ret {
|
||||
Ok(Ok(_)) => {
|
||||
has_succ = true;
|
||||
tracing::info!(
|
||||
?dst_peer_id,
|
||||
?listener,
|
||||
"try direct connect to peer success"
|
||||
);
|
||||
break;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
tracing::info!(?e, "try direct connect to peer failed");
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(?e, "try direct connect to peer task join failed");
|
||||
}
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn do_try_direct_connect_internal(
|
||||
self: &Arc<DirectConnectorManagerData>,
|
||||
dst_peer_id: PeerId,
|
||||
ip_list: GetIpListResponse,
|
||||
) -> Result<(), Error> {
|
||||
let enable_ipv6 = self.global_ctx.get_flags().enable_ipv6;
|
||||
let available_listeners = ip_list
|
||||
.listeners
|
||||
.clone()
|
||||
.into_iter()
|
||||
.map(Into::<url::Url>::into)
|
||||
.filter_map(|l| if l.scheme() != "ring" { Some(l) } else { None })
|
||||
.filter(|l| l.port().is_some() && l.host().is_some())
|
||||
.filter(|l| enable_ipv6 || !matches!(l.host().unwrap().to_owned(), Host::Ipv6(_)))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
tracing::debug!(?available_listeners, "got available listeners");
|
||||
|
||||
if available_listeners.is_empty() {
|
||||
return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into());
|
||||
}
|
||||
|
||||
if !has_succ {
|
||||
data.dst_listener_blacklist.insert(
|
||||
DstListenerUrlBlackListItem(dst_peer_id.clone(), listener.clone()),
|
||||
(),
|
||||
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
|
||||
let default_protocol = self.global_ctx.get_flags().default_protocol;
|
||||
// sort available listeners, default protocol has the highest priority, udp is second, others just random
|
||||
// highest priority is in the last
|
||||
let mut available_listeners = available_listeners;
|
||||
available_listeners.sort_by_key(|l| {
|
||||
let scheme = l.scheme();
|
||||
if scheme == default_protocol {
|
||||
3
|
||||
} else if scheme == "udp" {
|
||||
2
|
||||
} else {
|
||||
1
|
||||
}
|
||||
});
|
||||
|
||||
while !available_listeners.is_empty() {
|
||||
let mut tasks = JoinSet::new();
|
||||
let mut listener_list = vec![];
|
||||
|
||||
let cur_scheme = available_listeners.last().unwrap().scheme().to_owned();
|
||||
while let Some(listener) = available_listeners.last() {
|
||||
if listener.scheme() != cur_scheme {
|
||||
break;
|
||||
}
|
||||
|
||||
tracing::debug!("try direct connect to peer with listener: {}", listener);
|
||||
self.spawn_direct_connect_task(
|
||||
dst_peer_id.clone(),
|
||||
&ip_list,
|
||||
&listener,
|
||||
&mut tasks,
|
||||
);
|
||||
|
||||
listener_list.push(listener.clone().to_string());
|
||||
available_listeners.pop();
|
||||
}
|
||||
|
||||
let ret = tasks.join_all().await;
|
||||
tracing::debug!(
|
||||
?ret,
|
||||
?dst_peer_id,
|
||||
?cur_scheme,
|
||||
?listener_list,
|
||||
"all tasks finished for current scheme"
|
||||
);
|
||||
|
||||
if self.peer_manager.has_directly_connected_conn(dst_peer_id) {
|
||||
tracing::info!(
|
||||
"direct connect to peer {} success, has direct conn",
|
||||
dst_peer_id
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn do_try_direct_connect(
|
||||
data: Arc<DirectConnectorManagerData>,
|
||||
self: Arc<DirectConnectorManagerData>,
|
||||
dst_peer_id: PeerId,
|
||||
) -> Result<(), Error> {
|
||||
let peer_manager = data.peer_manager.clone();
|
||||
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
|
||||
let mut backoff =
|
||||
udp_hole_punch::BackOff::new(vec![1000, 2000, 2000, 5000, 5000, 10000, 30000, 60000]);
|
||||
loop {
|
||||
let peer_manager = self.peer_manager.clone();
|
||||
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
|
||||
|
||||
let rpc_stub = peer_manager
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_client()
|
||||
.scoped_client::<DirectConnectorRpcClientFactory<BaseController>>(
|
||||
let rpc_stub = peer_manager
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_client()
|
||||
.scoped_client::<DirectConnectorRpcClientFactory<BaseController>>(
|
||||
peer_manager.my_peer_id(),
|
||||
dst_peer_id,
|
||||
data.global_ctx.get_network_name(),
|
||||
self.global_ctx.get_network_name(),
|
||||
);
|
||||
|
||||
let ip_list = rpc_stub
|
||||
.get_ip_list(BaseController::default(), GetIpListRequest {})
|
||||
let ip_list = rpc_stub
|
||||
.get_ip_list(BaseController::default(), GetIpListRequest {})
|
||||
.await
|
||||
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?;
|
||||
|
||||
tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list");
|
||||
|
||||
let ret = self
|
||||
.do_try_direct_connect_internal(dst_peer_id, ip_list)
|
||||
.await;
|
||||
tracing::info!(?ret, ?dst_peer_id, "do_try_direct_connect return");
|
||||
|
||||
if peer_manager.has_directly_connected_conn(dst_peer_id) {
|
||||
tracing::info!(
|
||||
"direct connect to peer {} success, has direct conn",
|
||||
dst_peer_id
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(backoff.next_backoff())).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for DirectConnectorManagerData {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("DirectConnectorManagerData")
|
||||
.field("peer_manager", &self.peer_manager)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DirectConnectorManager {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
data: Arc<DirectConnectorManagerData>,
|
||||
client: PeerTaskManager<DirectConnectorLauncher>,
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DirectConnectorLauncher(Arc<DirectConnectorManagerData>);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl PeerTaskLauncher for DirectConnectorLauncher {
|
||||
type Data = Arc<DirectConnectorManagerData>;
|
||||
type CollectPeerItem = PeerId;
|
||||
type TaskRet = ();
|
||||
|
||||
fn new_data(&self, _peer_mgr: Arc<PeerManager>) -> Self::Data {
|
||||
self.0.clone()
|
||||
}
|
||||
|
||||
async fn collect_peers_need_task(&self, data: &Self::Data) -> Vec<Self::CollectPeerItem> {
|
||||
let my_peer_id = data.peer_manager.my_peer_id();
|
||||
data.peer_manager
|
||||
.list_peers()
|
||||
.await
|
||||
.with_context(|| format!("get ip list from peer {}", dst_peer_id))?;
|
||||
.into_iter()
|
||||
.filter(|peer_id| {
|
||||
*peer_id != my_peer_id && !data.peer_manager.has_directly_connected_conn(*peer_id)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
tracing::info!(ip_list = ?ip_list, dst_peer_id = ?dst_peer_id, "got ip list");
|
||||
async fn launch_task(
|
||||
&self,
|
||||
data: &Self::Data,
|
||||
item: Self::CollectPeerItem,
|
||||
) -> tokio::task::JoinHandle<Result<Self::TaskRet, anyhow::Error>> {
|
||||
let data = data.clone();
|
||||
tokio::spawn(async move { data.do_try_direct_connect(item).await.map_err(Into::into) })
|
||||
}
|
||||
|
||||
Self::do_try_direct_connect_internal(data, dst_peer_id, ip_list).await
|
||||
async fn all_task_done(&self, _data: &Self::Data) {}
|
||||
|
||||
fn loop_interval_ms(&self) -> u64 {
|
||||
5000
|
||||
}
|
||||
}
|
||||
|
||||
impl DirectConnectorManager {
|
||||
pub fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Self {
|
||||
let data = Arc::new(DirectConnectorManagerData::new(
|
||||
global_ctx.clone(),
|
||||
peer_manager.clone(),
|
||||
));
|
||||
let client = PeerTaskManager::new(DirectConnectorLauncher(data.clone()), peer_manager);
|
||||
Self {
|
||||
global_ctx,
|
||||
data,
|
||||
client,
|
||||
tasks: JoinSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn run(&mut self) {
|
||||
if self.global_ctx.get_flags().disable_p2p {
|
||||
return;
|
||||
}
|
||||
|
||||
self.run_as_server();
|
||||
self.run_as_client();
|
||||
}
|
||||
|
||||
pub fn run_as_server(&mut self) {
|
||||
self.data
|
||||
.peer_manager
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_server()
|
||||
.registry()
|
||||
.register(
|
||||
DirectConnectorRpcServer::new(DirectConnectorManagerRpcServer::new(
|
||||
self.global_ctx.clone(),
|
||||
)),
|
||||
&self.data.global_ctx.get_network_name(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn run_as_client(&mut self) {
|
||||
self.client.start();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -490,6 +681,13 @@ mod tests {
|
||||
|
||||
wait_route_appear(p_a.clone(), p_c.clone()).await.unwrap();
|
||||
|
||||
p_c.get_global_ctx()
|
||||
.get_ip_collector()
|
||||
.collect_ip_addrs()
|
||||
.await;
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(4)).await;
|
||||
|
||||
let mut dm_a = DirectConnectorManager::new(p_a.get_global_ctx(), p_a.clone());
|
||||
let mut dm_c = DirectConnectorManager::new(p_c.get_global_ctx(), p_c.clone());
|
||||
|
||||
@@ -524,6 +722,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn direct_connector_scheme_blacklist() {
|
||||
TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
let p_a = create_mock_peer_manager().await;
|
||||
let data = Arc::new(DirectConnectorManagerData::new(
|
||||
p_a.get_global_ctx(),
|
||||
@@ -538,7 +737,7 @@ mod tests {
|
||||
.interface_ipv4s
|
||||
.push("127.0.0.1".parse::<std::net::Ipv4Addr>().unwrap().into());
|
||||
|
||||
DirectConnectorManager::do_try_direct_connect_internal(data.clone(), 1, ip_list.clone())
|
||||
data.do_try_direct_connect_internal(1, ip_list.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -2,19 +2,15 @@ use std::{net::SocketAddr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
dns::{resolve_txt_record, RESOLVER},
|
||||
error::Error,
|
||||
global_ctx::ArcGlobalCtx,
|
||||
stun::{get_default_resolver_config, resolve_txt_record},
|
||||
},
|
||||
tunnel::{IpVersion, Tunnel, TunnelConnector, TunnelError, PROTO_PORT_OFFSET},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use dashmap::DashSet;
|
||||
use hickory_resolver::{
|
||||
config::{ResolverConfig, ResolverOpts},
|
||||
proto::rr::rdata::SRV,
|
||||
TokioAsyncResolver,
|
||||
};
|
||||
use hickory_resolver::proto::rr::rdata::SRV;
|
||||
use rand::{seq::SliceRandom, Rng as _};
|
||||
|
||||
use crate::proto::common::TunnelInfo;
|
||||
@@ -43,9 +39,6 @@ pub struct DNSTunnelConnector {
|
||||
bind_addrs: Vec<SocketAddr>,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
ip_version: IpVersion,
|
||||
|
||||
default_resolve_config: ResolverConfig,
|
||||
default_resolve_opts: ResolverOpts,
|
||||
}
|
||||
|
||||
impl DNSTunnelConnector {
|
||||
@@ -55,9 +48,6 @@ impl DNSTunnelConnector {
|
||||
bind_addrs: Vec::new(),
|
||||
global_ctx,
|
||||
ip_version: IpVersion::Both,
|
||||
|
||||
default_resolve_config: get_default_resolver_config(),
|
||||
default_resolve_opts: ResolverOpts::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,12 +56,7 @@ impl DNSTunnelConnector {
|
||||
&self,
|
||||
domain_name: &str,
|
||||
) -> Result<Box<dyn TunnelConnector>, Error> {
|
||||
let resolver =
|
||||
TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio(
|
||||
self.default_resolve_config.clone(),
|
||||
self.default_resolve_opts.clone(),
|
||||
));
|
||||
let txt_data = resolve_txt_record(domain_name, &resolver)
|
||||
let txt_data = resolve_txt_record(domain_name)
|
||||
.await
|
||||
.with_context(|| format!("resolve txt record failed, domain_name: {}", domain_name))?;
|
||||
|
||||
@@ -91,8 +76,8 @@ impl DNSTunnelConnector {
|
||||
)
|
||||
})?;
|
||||
|
||||
let mut connector = create_connector_by_url(url.as_str(), &self.global_ctx).await?;
|
||||
connector.set_ip_version(self.ip_version);
|
||||
let connector =
|
||||
create_connector_by_url(url.as_str(), &self.global_ctx, self.ip_version).await?;
|
||||
Ok(connector)
|
||||
}
|
||||
|
||||
@@ -126,12 +111,6 @@ impl DNSTunnelConnector {
|
||||
) -> Result<Box<dyn TunnelConnector>, Error> {
|
||||
tracing::info!("handle_srv_record: {}", domain_name);
|
||||
|
||||
let resolver =
|
||||
TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio(
|
||||
self.default_resolve_config.clone(),
|
||||
self.default_resolve_opts.clone(),
|
||||
));
|
||||
|
||||
let srv_domains = PROTO_PORT_OFFSET
|
||||
.iter()
|
||||
.map(|(p, _)| (format!("_easytier._{}.{}", p, domain_name), *p)) // _easytier._udp.{domain_name}
|
||||
@@ -141,7 +120,7 @@ impl DNSTunnelConnector {
|
||||
let srv_lookup_tasks = srv_domains
|
||||
.iter()
|
||||
.map(|(srv_domain, protocol)| {
|
||||
let resolver = resolver.clone();
|
||||
let resolver = RESOLVER.clone();
|
||||
let responses = responses.clone();
|
||||
async move {
|
||||
let response = resolver.srv_lookup(srv_domain).await.with_context(|| {
|
||||
@@ -179,8 +158,8 @@ impl DNSTunnelConnector {
|
||||
)
|
||||
})?;
|
||||
|
||||
let mut connector = create_connector_by_url(url.as_str(), &self.global_ctx).await?;
|
||||
connector.set_ip_version(self.ip_version);
|
||||
let connector =
|
||||
create_connector_by_url(url.as_str(), &self.global_ctx, self.ip_version).await?;
|
||||
Ok(connector)
|
||||
}
|
||||
}
|
||||
@@ -242,8 +221,18 @@ mod tests {
|
||||
let url = "txt://txt.easytier.cn";
|
||||
let global_ctx = get_mock_global_ctx();
|
||||
let mut connector = DNSTunnelConnector::new(url.parse().unwrap(), global_ctx);
|
||||
let ret = connector.connect().await.unwrap();
|
||||
println!("{:?}", ret.info());
|
||||
connector.set_ip_version(IpVersion::V4);
|
||||
for _ in 0..5 {
|
||||
match connector.connect().await {
|
||||
Ok(ret) => {
|
||||
println!("{:?}", ret.info());
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -251,7 +240,17 @@ mod tests {
|
||||
let url = "srv://easytier.cn";
|
||||
let global_ctx = get_mock_global_ctx();
|
||||
let mut connector = DNSTunnelConnector::new(url.parse().unwrap(), global_ctx);
|
||||
let ret = connector.connect().await.unwrap();
|
||||
println!("{:?}", ret.info());
|
||||
connector.set_ip_version(IpVersion::V4);
|
||||
for _ in 0..5 {
|
||||
match connector.connect().await {
|
||||
Ok(ret) => {
|
||||
println!("{:?}", ret.info());
|
||||
return;
|
||||
}
|
||||
Err(e) => {
|
||||
println!("{:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,14 +92,24 @@ impl HttpTunnelConnector {
|
||||
if !query.is_empty() {
|
||||
tracing::info!("try to create connector by url: {}", query[0]);
|
||||
self.redirect_type = HttpRedirectType::RedirectToQuery;
|
||||
return create_connector_by_url(&query[0].to_string(), &self.global_ctx).await;
|
||||
return create_connector_by_url(
|
||||
&query[0].to_string(),
|
||||
&self.global_ctx,
|
||||
self.ip_version,
|
||||
)
|
||||
.await;
|
||||
} else if let Some(new_url) = url_str
|
||||
.strip_prefix(format!("{}://", url.scheme()).as_str())
|
||||
.and_then(|x| Url::parse(x).ok())
|
||||
{
|
||||
// stripe the scheme and create connector by url
|
||||
self.redirect_type = HttpRedirectType::RedirectToUrl;
|
||||
return create_connector_by_url(new_url.as_str(), &self.global_ctx).await;
|
||||
return create_connector_by_url(
|
||||
new_url.as_str(),
|
||||
&self.global_ctx,
|
||||
self.ip_version,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
return Err(Error::InvalidUrl(format!(
|
||||
"no valid connector url found in url: {}",
|
||||
@@ -107,7 +117,8 @@ impl HttpTunnelConnector {
|
||||
)));
|
||||
} else {
|
||||
self.redirect_type = HttpRedirectType::RedirectToUrl;
|
||||
return create_connector_by_url(new_url.as_str(), &self.global_ctx).await;
|
||||
return create_connector_by_url(new_url.as_str(), &self.global_ctx, self.ip_version)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +148,7 @@ impl HttpTunnelConnector {
|
||||
continue;
|
||||
}
|
||||
self.redirect_type = HttpRedirectType::BodyUrls;
|
||||
return create_connector_by_url(line, &self.global_ctx).await;
|
||||
return create_connector_by_url(line, &self.global_ctx, self.ip_version).await;
|
||||
}
|
||||
|
||||
Err(Error::InvalidUrl(format!(
|
||||
|
||||
@@ -3,7 +3,10 @@ use std::{collections::BTreeSet, sync::Arc};
|
||||
use anyhow::Context;
|
||||
use dashmap::{DashMap, DashSet};
|
||||
use tokio::{
|
||||
sync::{broadcast::Receiver, mpsc, Mutex},
|
||||
sync::{
|
||||
broadcast::{error::RecvError, Receiver},
|
||||
mpsc, Mutex,
|
||||
},
|
||||
task::JoinSet,
|
||||
time::timeout,
|
||||
};
|
||||
@@ -106,7 +109,7 @@ impl ManualConnectorManager {
|
||||
}
|
||||
|
||||
pub async fn add_connector_by_url(&self, url: &str) -> Result<(), Error> {
|
||||
self.add_connector(create_connector_by_url(url, &self.global_ctx).await?);
|
||||
self.add_connector(create_connector_by_url(url, &self.global_ctx, IpVersion::Both).await?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -179,8 +182,37 @@ impl ManualConnectorManager {
|
||||
mut event_recv: Receiver<GlobalCtxEvent>,
|
||||
) {
|
||||
loop {
|
||||
let event = event_recv.recv().await.expect("event_recv got error");
|
||||
Self::handle_event(&event, &data).await;
|
||||
match event_recv.recv().await {
|
||||
Ok(event) => {
|
||||
Self::handle_event(&event, &data).await;
|
||||
}
|
||||
Err(RecvError::Lagged(n)) => {
|
||||
tracing::warn!("event_recv lagged: {}, rebuild alive conn list", n);
|
||||
event_recv = event_recv.resubscribe();
|
||||
data.alive_conn_urls.clear();
|
||||
for x in data
|
||||
.peer_manager
|
||||
.get_peer_map()
|
||||
.get_alive_conns()
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.tunnel
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.remote_addr
|
||||
.unwrap_or_default()
|
||||
.to_string()
|
||||
})
|
||||
{
|
||||
data.alive_conn_urls.insert(x);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
Err(RecvError::Closed) => {
|
||||
tracing::warn!("event_recv closed, exit");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,7 +303,6 @@ impl ManualConnectorManager {
|
||||
|
||||
async fn collect_dead_conns(data: Arc<ConnectorManagerData>) -> BTreeSet<String> {
|
||||
Self::handle_remove_connector(data.clone());
|
||||
|
||||
let all_urls: BTreeSet<String> = data
|
||||
.connectors
|
||||
.iter()
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::{
|
||||
common::{error::Error, global_ctx::ArcGlobalCtx, network::IPCollector},
|
||||
tunnel::{
|
||||
check_scheme_and_get_socket_addr, ring::RingTunnelConnector, tcp::TcpTunnelConnector,
|
||||
udp::UdpTunnelConnector, TunnelConnector,
|
||||
udp::UdpTunnelConnector, IpVersion, TunnelConnector,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -43,8 +43,8 @@ async fn set_bind_addr_for_peer_connector(
|
||||
connector.set_bind_addrs(bind_addrs);
|
||||
} else {
|
||||
let mut bind_addrs = vec![];
|
||||
for ipv6 in ips.interface_ipv6s {
|
||||
let socket_addr = SocketAddrV6::new(ipv6.into(), 0, 0, 0).into();
|
||||
for ipv6 in ips.interface_ipv6s.iter().chain(ips.public_ipv6.iter()) {
|
||||
let socket_addr = SocketAddrV6::new(std::net::Ipv6Addr::from(*ipv6), 0, 0, 0).into();
|
||||
bind_addrs.push(socket_addr);
|
||||
}
|
||||
connector.set_bind_addrs(bind_addrs);
|
||||
@@ -55,11 +55,13 @@ async fn set_bind_addr_for_peer_connector(
|
||||
pub async fn create_connector_by_url(
|
||||
url: &str,
|
||||
global_ctx: &ArcGlobalCtx,
|
||||
ip_version: IpVersion,
|
||||
) -> Result<Box<dyn TunnelConnector + 'static>, Error> {
|
||||
let url = url::Url::parse(url).map_err(|_| Error::InvalidUrl(url.to_owned()))?;
|
||||
match url.scheme() {
|
||||
let mut connector: Box<dyn TunnelConnector + 'static> = match url.scheme() {
|
||||
"tcp" => {
|
||||
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp")?;
|
||||
let dst_addr =
|
||||
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "tcp", ip_version).await?;
|
||||
let mut connector = TcpTunnelConnector::new(url);
|
||||
if global_ctx.config.get_flags().bind_device {
|
||||
set_bind_addr_for_peer_connector(
|
||||
@@ -69,10 +71,11 @@ pub async fn create_connector_by_url(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
"udp" => {
|
||||
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp")?;
|
||||
let dst_addr =
|
||||
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "udp", ip_version).await?;
|
||||
let mut connector = UdpTunnelConnector::new(url);
|
||||
if global_ctx.config.get_flags().bind_device {
|
||||
set_bind_addr_for_peer_connector(
|
||||
@@ -82,20 +85,21 @@ pub async fn create_connector_by_url(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
"http" | "https" => {
|
||||
let connector = HttpTunnelConnector::new(url, global_ctx.clone());
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
"ring" => {
|
||||
check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring")?;
|
||||
check_scheme_and_get_socket_addr::<uuid::Uuid>(&url, "ring", IpVersion::Both).await?;
|
||||
let connector = RingTunnelConnector::new(url);
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
#[cfg(feature = "quic")]
|
||||
"quic" => {
|
||||
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic")?;
|
||||
let dst_addr =
|
||||
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic", ip_version).await?;
|
||||
let mut connector = QUICTunnelConnector::new(url);
|
||||
if global_ctx.config.get_flags().bind_device {
|
||||
set_bind_addr_for_peer_connector(
|
||||
@@ -105,11 +109,12 @@ pub async fn create_connector_by_url(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
#[cfg(feature = "wireguard")]
|
||||
"wg" => {
|
||||
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg")?;
|
||||
let dst_addr =
|
||||
check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg", ip_version).await?;
|
||||
let nid = global_ctx.get_network_identity();
|
||||
let wg_config = WgConfig::new_from_network_identity(
|
||||
&nid.network_name,
|
||||
@@ -124,12 +129,12 @@ pub async fn create_connector_by_url(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
#[cfg(feature = "websocket")]
|
||||
"ws" | "wss" => {
|
||||
use crate::tunnel::{FromUrl, IpVersion};
|
||||
let dst_addr = SocketAddr::from_url(url.clone(), IpVersion::Both)?;
|
||||
use crate::tunnel::FromUrl;
|
||||
let dst_addr = SocketAddr::from_url(url.clone(), ip_version).await?;
|
||||
let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url);
|
||||
if global_ctx.config.get_flags().bind_device {
|
||||
set_bind_addr_for_peer_connector(
|
||||
@@ -139,14 +144,17 @@ pub async fn create_connector_by_url(
|
||||
)
|
||||
.await;
|
||||
}
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
"txt" | "srv" => {
|
||||
let connector = dns_connector::DNSTunnelConnector::new(url, global_ctx.clone());
|
||||
return Ok(Box::new(connector));
|
||||
Box::new(connector)
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::InvalidUrl(url.into()));
|
||||
}
|
||||
}
|
||||
};
|
||||
connector.set_ip_version(ip_version);
|
||||
|
||||
Ok(connector)
|
||||
}
|
||||
|
||||
@@ -495,6 +495,7 @@ impl PunchHoleServerCommon {
|
||||
.udp_nat_type
|
||||
}
|
||||
|
||||
#[async_recursion::async_recursion]
|
||||
pub(crate) async fn select_listener(
|
||||
&self,
|
||||
use_new_listener: bool,
|
||||
@@ -515,24 +516,28 @@ impl PunchHoleServerCommon {
|
||||
let mut locked = all_listener_sockets.lock().await;
|
||||
|
||||
let listener = if use_last {
|
||||
locked.last_mut()?
|
||||
Some(locked.last_mut()?)
|
||||
} else {
|
||||
// use the listener that is active most recently
|
||||
locked
|
||||
.iter_mut()
|
||||
.max_by_key(|listener| listener.last_active_time.load())?
|
||||
.filter(|l| !l.mapped_addr.ip().is_unspecified())
|
||||
.max_by_key(|listener| listener.last_active_time.load())
|
||||
};
|
||||
|
||||
if listener.mapped_addr.ip().is_unspecified() {
|
||||
tracing::info!("listener mapped addr is unspecified, trying to get mapped addr");
|
||||
listener.mapped_addr = self
|
||||
.get_global_ctx()
|
||||
.get_stun_info_collector()
|
||||
.get_udp_port_mapping(listener.mapped_addr.port())
|
||||
.await
|
||||
.ok()?;
|
||||
if listener.is_none() || listener.as_ref().unwrap().mapped_addr.ip().is_unspecified() {
|
||||
tracing::warn!(
|
||||
?use_new_listener,
|
||||
"no available udp hole punching listener with mapped address"
|
||||
);
|
||||
if !use_new_listener {
|
||||
return self.select_listener(true).await;
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
|
||||
let listener = listener.unwrap();
|
||||
Some((listener.get_socket().await, listener.mapped_addr))
|
||||
}
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ impl UdpHolePunchRpc for UdpHolePunchServer {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct BackOff {
|
||||
pub struct BackOff {
|
||||
backoffs_ms: Vec<u64>,
|
||||
current_idx: usize,
|
||||
}
|
||||
|
||||
@@ -434,7 +434,7 @@ impl PunchSymToConeHoleClient {
|
||||
let public_ips: Vec<Ipv4Addr> = stun_info
|
||||
.public_ip
|
||||
.iter()
|
||||
.map(|x| x.parse().unwrap())
|
||||
.filter_map(|x| x.parse().ok())
|
||||
.collect();
|
||||
if public_ips.is_empty() {
|
||||
return Err(anyhow::anyhow!("failed to get public ips"));
|
||||
|
||||
@@ -3,12 +3,14 @@ use std::{
|
||||
fmt::Write,
|
||||
net::{IpAddr, SocketAddr},
|
||||
path::PathBuf,
|
||||
str::FromStr,
|
||||
sync::Mutex,
|
||||
time::Duration,
|
||||
vec,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use cidr::Ipv4Inet;
|
||||
use clap::{command, Args, Parser, Subcommand};
|
||||
use humansize::format_size;
|
||||
use service_manager::*;
|
||||
@@ -51,6 +53,15 @@ struct Cli {
|
||||
#[arg(short, long, default_value = "false", help = "verbose output")]
|
||||
verbose: bool,
|
||||
|
||||
#[arg(
|
||||
short = 'o',
|
||||
long = "output",
|
||||
value_enum,
|
||||
default_value = "table",
|
||||
help = "output format"
|
||||
)]
|
||||
output_format: OutputFormat,
|
||||
|
||||
#[command(subcommand)]
|
||||
sub_command: SubCommand,
|
||||
}
|
||||
@@ -77,23 +88,23 @@ enum SubCommand {
|
||||
Proxy,
|
||||
}
|
||||
|
||||
#[derive(clap::ValueEnum, Debug, Clone, PartialEq)]
|
||||
enum OutputFormat {
|
||||
Table,
|
||||
Json,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct PeerArgs {
|
||||
#[command(subcommand)]
|
||||
sub_command: Option<PeerSubCommand>,
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
struct PeerListArgs {
|
||||
#[arg(short, long)]
|
||||
verbose: bool,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
enum PeerSubCommand {
|
||||
Add,
|
||||
Remove,
|
||||
List(PeerListArgs),
|
||||
List,
|
||||
ListForeign,
|
||||
ListGlobalForeign,
|
||||
}
|
||||
@@ -193,14 +204,15 @@ struct InstallArgs {
|
||||
|
||||
type Error = anyhow::Error;
|
||||
|
||||
struct CommandHandler {
|
||||
struct CommandHandler<'a> {
|
||||
client: Mutex<RpcClient>,
|
||||
verbose: bool,
|
||||
output_format: &'a OutputFormat,
|
||||
}
|
||||
|
||||
type RpcClient = StandAloneClient<TcpTunnelConnector>;
|
||||
|
||||
impl CommandHandler {
|
||||
impl CommandHandler<'_> {
|
||||
async fn get_peer_manager_client(
|
||||
&self,
|
||||
) -> Result<Box<dyn PeerManageRpc<Controller = BaseController>>, Error> {
|
||||
@@ -294,9 +306,12 @@ impl CommandHandler {
|
||||
println!("remove peer");
|
||||
}
|
||||
|
||||
async fn handle_peer_list(&self, _args: &PeerArgs) -> Result<(), Error> {
|
||||
#[derive(tabled::Tabled)]
|
||||
async fn handle_peer_list(&self) -> Result<(), Error> {
|
||||
#[derive(tabled::Tabled, serde::Serialize)]
|
||||
struct PeerTableItem {
|
||||
#[tabled(rename = "ipv4")]
|
||||
cidr: String,
|
||||
#[tabled(skip)]
|
||||
ipv4: String,
|
||||
hostname: String,
|
||||
cost: String,
|
||||
@@ -314,7 +329,12 @@ impl CommandHandler {
|
||||
fn from(p: PeerRoutePair) -> Self {
|
||||
let route = p.route.clone().unwrap_or_default();
|
||||
PeerTableItem {
|
||||
ipv4: route.ipv4_addr.map(|ip| ip.to_string()).unwrap_or_default(),
|
||||
cidr: route.ipv4_addr.map(|ip| ip.to_string()).unwrap_or_default(),
|
||||
ipv4: route
|
||||
.ipv4_addr
|
||||
.map(|ip: easytier::proto::common::Ipv4Inet| ip.address.unwrap_or_default())
|
||||
.map(|ip| ip.to_string())
|
||||
.unwrap_or_default(),
|
||||
hostname: route.hostname.clone(),
|
||||
cost: cost_to_str(route.cost),
|
||||
lat_ms: if route.cost == 1 {
|
||||
@@ -344,7 +364,10 @@ impl CommandHandler {
|
||||
impl From<NodeInfo> for PeerTableItem {
|
||||
fn from(p: NodeInfo) -> Self {
|
||||
PeerTableItem {
|
||||
ipv4: p.ipv4_addr.clone(),
|
||||
cidr: p.ipv4_addr.clone(),
|
||||
ipv4: Ipv4Inet::from_str(&p.ipv4_addr)
|
||||
.map(|ip| ip.address().to_string())
|
||||
.unwrap_or_default(),
|
||||
hostname: p.hostname.clone(),
|
||||
cost: "Local".to_string(),
|
||||
lat_ms: "-".to_string(),
|
||||
@@ -366,7 +389,7 @@ impl CommandHandler {
|
||||
let mut items: Vec<PeerTableItem> = vec![];
|
||||
let peer_routes = self.list_peer_route_pair().await?;
|
||||
if self.verbose {
|
||||
println!("{:#?}", peer_routes);
|
||||
println!("{}", serde_json::to_string_pretty(&peer_routes)?);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -382,7 +405,7 @@ impl CommandHandler {
|
||||
items.push(p.into());
|
||||
}
|
||||
|
||||
println!("{}", tabled::Table::new(items).with(Style::modern()));
|
||||
print_output(&items, self.output_format)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -404,8 +427,9 @@ impl CommandHandler {
|
||||
.list_foreign_network(BaseController::default(), request)
|
||||
.await?;
|
||||
let network_map = response;
|
||||
if self.verbose {
|
||||
println!("{:#?}", network_map);
|
||||
if self.verbose || *self.output_format == OutputFormat::Json {
|
||||
let json = serde_json::to_string_pretty(&network_map.foreign_networks)?;
|
||||
println!("{}", json);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -445,8 +469,11 @@ impl CommandHandler {
|
||||
let response = client
|
||||
.list_global_foreign_network(BaseController::default(), request)
|
||||
.await?;
|
||||
if self.verbose {
|
||||
println!("{:#?}", response);
|
||||
if self.verbose || *self.output_format == OutputFormat::Json {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&response.foreign_networks)?
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -464,7 +491,7 @@ impl CommandHandler {
|
||||
}
|
||||
|
||||
async fn handle_route_list(&self) -> Result<(), Error> {
|
||||
#[derive(tabled::Tabled)]
|
||||
#[derive(tabled::Tabled, serde::Serialize)]
|
||||
struct RouteTableItem {
|
||||
ipv4: String,
|
||||
hostname: String,
|
||||
@@ -491,6 +518,23 @@ impl CommandHandler {
|
||||
.await?
|
||||
.node_info
|
||||
.ok_or(anyhow::anyhow!("node info not found"))?;
|
||||
let peer_routes = self.list_peer_route_pair().await?;
|
||||
|
||||
if self.verbose {
|
||||
#[derive(serde::Serialize)]
|
||||
struct VerboseItem {
|
||||
node_info: NodeInfo,
|
||||
peer_routes: Vec<PeerRoutePair>,
|
||||
}
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&VerboseItem {
|
||||
node_info,
|
||||
peer_routes
|
||||
})?
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
items.push(RouteTableItem {
|
||||
ipv4: node_info.ipv4_addr.clone(),
|
||||
@@ -510,7 +554,6 @@ impl CommandHandler {
|
||||
|
||||
version: node_info.version.clone(),
|
||||
});
|
||||
let peer_routes = self.list_peer_route_pair().await?;
|
||||
for p in peer_routes.iter() {
|
||||
let Some(next_hop_pair) = peer_routes.iter().find(|pair| {
|
||||
pair.route.clone().unwrap_or_default().peer_id
|
||||
@@ -634,7 +677,7 @@ impl CommandHandler {
|
||||
}
|
||||
}
|
||||
|
||||
println!("{}", tabled::Table::new(items).with(Style::modern()));
|
||||
print_output(&items, self.output_format)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -645,6 +688,10 @@ impl CommandHandler {
|
||||
let response = client
|
||||
.list_connector(BaseController::default(), request)
|
||||
.await?;
|
||||
if self.verbose || *self.output_format == OutputFormat::Json {
|
||||
println!("{}", serde_json::to_string_pretty(&response.connectors)?);
|
||||
return Ok(());
|
||||
}
|
||||
println!("response: {:#?}", response);
|
||||
Ok(())
|
||||
}
|
||||
@@ -912,6 +959,21 @@ impl Service {
|
||||
}
|
||||
}
|
||||
|
||||
fn print_output<T>(items: &[T], format: &OutputFormat) -> Result<(), Error>
|
||||
where
|
||||
T: tabled::Tabled + serde::Serialize,
|
||||
{
|
||||
match format {
|
||||
OutputFormat::Table => {
|
||||
println!("{}", tabled::Table::new(items).with(Style::modern()));
|
||||
}
|
||||
OutputFormat::Json => {
|
||||
println!("{}", serde_json::to_string_pretty(items)?);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
#[tracing::instrument]
|
||||
async fn main() -> Result<(), Error> {
|
||||
@@ -924,6 +986,7 @@ async fn main() -> Result<(), Error> {
|
||||
let handler = CommandHandler {
|
||||
client: Mutex::new(client),
|
||||
verbose: cli.verbose,
|
||||
output_format: &cli.output_format,
|
||||
};
|
||||
|
||||
match cli.sub_command {
|
||||
@@ -934,12 +997,8 @@ async fn main() -> Result<(), Error> {
|
||||
Some(PeerSubCommand::Remove) => {
|
||||
println!("remove peer");
|
||||
}
|
||||
Some(PeerSubCommand::List(arg)) => {
|
||||
if arg.verbose {
|
||||
println!("{:#?}", handler.list_peer_route_pair().await?);
|
||||
} else {
|
||||
handler.handle_peer_list(&peer_args).await?;
|
||||
}
|
||||
Some(PeerSubCommand::List) => {
|
||||
handler.handle_peer_list().await?;
|
||||
}
|
||||
Some(PeerSubCommand::ListForeign) => {
|
||||
handler.handle_foreign_network_list().await?;
|
||||
@@ -948,7 +1007,7 @@ async fn main() -> Result<(), Error> {
|
||||
handler.handle_global_foreign_network_list().await?;
|
||||
}
|
||||
None => {
|
||||
handler.handle_peer_list(&peer_args).await?;
|
||||
handler.handle_peer_list().await?;
|
||||
}
|
||||
},
|
||||
SubCommand::Connector(conn_args) => match conn_args.sub_command {
|
||||
@@ -975,7 +1034,14 @@ async fn main() -> Result<(), Error> {
|
||||
loop {
|
||||
let ret = collector.get_stun_info();
|
||||
if ret.udp_nat_type != NatType::Unknown as i32 {
|
||||
println!("stun info: {:#?}", ret);
|
||||
if cli.output_format == OutputFormat::Json {
|
||||
match serde_json::to_string_pretty(&ret) {
|
||||
Ok(json) => println!("{}", json),
|
||||
Err(e) => eprintln!("Error serializing to JSON: {}", e),
|
||||
}
|
||||
} else {
|
||||
println!("stun info: {:#?}", ret);
|
||||
}
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
@@ -993,27 +1059,45 @@ async fn main() -> Result<(), Error> {
|
||||
)
|
||||
.await?;
|
||||
|
||||
#[derive(tabled::Tabled)]
|
||||
#[derive(tabled::Tabled, serde::Serialize)]
|
||||
struct PeerCenterTableItem {
|
||||
node_id: String,
|
||||
direct_peers: String,
|
||||
#[tabled(rename = "direct_peers")]
|
||||
#[serde(skip_serializing)]
|
||||
direct_peers_str: String,
|
||||
#[tabled(skip)]
|
||||
direct_peers: Vec<DirectPeerItem>,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
struct DirectPeerItem {
|
||||
node_id: String,
|
||||
latency_ms: i32,
|
||||
}
|
||||
|
||||
let mut table_rows = vec![];
|
||||
for (k, v) in resp.global_peer_map.iter() {
|
||||
let node_id = k;
|
||||
let direct_peers = v
|
||||
let direct_peers_strs = v
|
||||
.direct_peers
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,))
|
||||
.collect::<Vec<_>>();
|
||||
let direct_peers: Vec<_> = v.direct_peers
|
||||
.iter()
|
||||
.map(|(k, v)| DirectPeerItem {
|
||||
node_id: k.to_string(),
|
||||
latency_ms: v.latency_ms,
|
||||
})
|
||||
.collect();
|
||||
table_rows.push(PeerCenterTableItem {
|
||||
node_id: node_id.to_string(),
|
||||
direct_peers: direct_peers.join("\n"),
|
||||
direct_peers_str: direct_peers_strs.join("\n"),
|
||||
direct_peers,
|
||||
});
|
||||
}
|
||||
|
||||
println!("{}", tabled::Table::new(table_rows).with(Style::modern()));
|
||||
print_output(&table_rows, &cli.output_format)?;
|
||||
}
|
||||
SubCommand::VpnPortal => {
|
||||
let vpn_portal_client = handler.get_vpn_portal_client().await?;
|
||||
@@ -1045,6 +1129,11 @@ async fn main() -> Result<(), Error> {
|
||||
.ok_or(anyhow::anyhow!("node info not found"))?;
|
||||
match sub_cmd.sub_command {
|
||||
Some(NodeSubCommand::Info) | None => {
|
||||
if cli.verbose || cli.output_format == OutputFormat::Json {
|
||||
println!("{}", serde_json::to_string_pretty(&node_info)?);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let stun_info = node_info.stun_info.clone().unwrap_or_default();
|
||||
let ip_list = node_info.ip_list.clone().unwrap_or_default();
|
||||
|
||||
@@ -1186,7 +1275,12 @@ async fn main() -> Result<(), Error> {
|
||||
.await;
|
||||
entries.extend(ret.unwrap_or_default().entries);
|
||||
|
||||
#[derive(tabled::Tabled)]
|
||||
if cli.verbose {
|
||||
println!("{}", serde_json::to_string_pretty(&entries)?);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
#[derive(tabled::Tabled, serde::Serialize)]
|
||||
struct TableItem {
|
||||
src: String,
|
||||
dst: String,
|
||||
@@ -1215,7 +1309,7 @@ async fn main() -> Result<(), Error> {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
println!("{}", tabled::Table::new(table_rows).with(Style::modern()));
|
||||
print_output(&table_rows, &cli.output_format)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -45,11 +45,13 @@ impl IpPacket {
|
||||
// make sure the fragment doesn't overlap with existing fragments
|
||||
for f in &self.fragments {
|
||||
if f.offset <= fragment.offset && fragment.offset < f.offset + f.data.len() as u16 {
|
||||
tracing::trace!("fragment overlap 1, f.offset = {}, fragment.offset = {}, f.data.len() = {}, fragment.data.len() = {}", f.offset, fragment.offset, f.data.len(), fragment.data.len());
|
||||
return;
|
||||
}
|
||||
if fragment.offset <= f.offset
|
||||
&& f.offset < fragment.offset + fragment.data.len() as u16
|
||||
{
|
||||
tracing::trace!("fragment overlap 2, f.offset = {}, fragment.offset = {}, f.data.len() = {}, fragment.data.len() = {}", f.offset, fragment.offset, f.data.len(), fragment.data.len());
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -151,6 +153,13 @@ impl IpReassembler {
|
||||
id,
|
||||
};
|
||||
|
||||
tracing::trace!(
|
||||
?key,
|
||||
"add fragment, offset = {}, total_length = {}",
|
||||
fragment.offset,
|
||||
total_length
|
||||
);
|
||||
|
||||
let mut entry = self.packets.entry(key.clone()).or_insert_with(|| {
|
||||
let packet = IpPacket::new(source, destination);
|
||||
let timestamp = Instant::now();
|
||||
|
||||
@@ -106,8 +106,8 @@ async fn handle_kcp_output(
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NatDstKcpConnector {
|
||||
kcp_endpoint: Arc<KcpEndpoint>,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
pub(crate) kcp_endpoint: Arc<KcpEndpoint>,
|
||||
pub(crate) peer_mgr: Arc<PeerManager>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -299,6 +299,10 @@ impl KcpProxySrc {
|
||||
pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstKcpConnector>> {
|
||||
self.tcp_proxy.0.clone()
|
||||
}
|
||||
|
||||
pub fn get_kcp_endpoint(&self) -> Arc<KcpEndpoint> {
|
||||
self.kcp_endpoint.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct KcpProxyDst {
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
sync::{Arc, Weak},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use kcp_sys::{endpoint::KcpEndpoint, stream::KcpStream};
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
config::PortForwardConfig, global_ctx::GlobalCtxEvent, join_joinset_background,
|
||||
scoped_task::ScopedTask,
|
||||
},
|
||||
gateway::{
|
||||
fast_socks5::{
|
||||
server::{
|
||||
@@ -12,19 +19,22 @@ use crate::{
|
||||
},
|
||||
util::stream::tcp_connect_with_timeout,
|
||||
},
|
||||
tokio_smoltcp::TcpStream,
|
||||
ip_reassembler::IpReassembler,
|
||||
kcp_proxy::NatDstKcpConnector,
|
||||
tokio_smoltcp::{channel_device, Net, NetConfig},
|
||||
},
|
||||
tunnel::packet_def::PacketType,
|
||||
tunnel::packet_def::{PacketType, ZCPacket},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use dashmap::DashSet;
|
||||
use pnet::packet::{ip::IpNextHeaderProtocols, ipv4::Ipv4Packet, tcp::TcpPacket, Packet};
|
||||
use tokio::{
|
||||
io::{AsyncRead, AsyncWrite},
|
||||
select,
|
||||
use dashmap::DashMap;
|
||||
use pnet::packet::{
|
||||
ip::IpNextHeaderProtocols, ipv4::Ipv4Packet, tcp::TcpPacket, udp::UdpPacket, Packet,
|
||||
};
|
||||
use tokio::{
|
||||
io::{AsyncRead, AsyncWrite},
|
||||
net::TcpListener,
|
||||
net::UdpSocket,
|
||||
select,
|
||||
sync::{mpsc, Mutex},
|
||||
task::JoinSet,
|
||||
time::timeout,
|
||||
@@ -32,14 +42,36 @@ use tokio::{
|
||||
|
||||
use crate::{
|
||||
common::{error::Error, global_ctx::GlobalCtx},
|
||||
gateway::tokio_smoltcp::{channel_device, Net, NetConfig},
|
||||
peers::{peer_manager::PeerManager, PeerPacketFilter},
|
||||
tunnel::packet_def::ZCPacket,
|
||||
};
|
||||
|
||||
use super::tcp_proxy::NatDstConnector as _;
|
||||
|
||||
enum SocksUdpSocket {
|
||||
UdpSocket(Arc<tokio::net::UdpSocket>),
|
||||
SmolUdpSocket(super::tokio_smoltcp::UdpSocket),
|
||||
}
|
||||
|
||||
impl SocksUdpSocket {
|
||||
pub async fn send_to(&self, buf: &[u8], addr: SocketAddr) -> Result<usize, std::io::Error> {
|
||||
match self {
|
||||
SocksUdpSocket::UdpSocket(socket) => socket.send_to(buf, addr).await,
|
||||
SocksUdpSocket::SmolUdpSocket(socket) => socket.send_to(buf, addr).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn recv_from(&self, buf: &mut [u8]) -> Result<(usize, SocketAddr), std::io::Error> {
|
||||
match self {
|
||||
SocksUdpSocket::UdpSocket(socket) => socket.recv_from(buf).await,
|
||||
SocksUdpSocket::SmolUdpSocket(socket) => socket.recv_from(buf).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum SocksTcpStream {
|
||||
TcpStream(tokio::net::TcpStream),
|
||||
SmolTcpStream(TcpStream),
|
||||
SmolTcpStream(super::tokio_smoltcp::TcpStream),
|
||||
KcpStream(KcpStream),
|
||||
}
|
||||
|
||||
impl AsyncRead for SocksTcpStream {
|
||||
@@ -55,6 +87,9 @@ impl AsyncRead for SocksTcpStream {
|
||||
SocksTcpStream::SmolTcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_read(cx, buf)
|
||||
}
|
||||
SocksTcpStream::KcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -72,6 +107,9 @@ impl AsyncWrite for SocksTcpStream {
|
||||
SocksTcpStream::SmolTcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_write(cx, buf)
|
||||
}
|
||||
SocksTcpStream::KcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_write(cx, buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,6 +122,7 @@ impl AsyncWrite for SocksTcpStream {
|
||||
SocksTcpStream::SmolTcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_flush(cx)
|
||||
}
|
||||
SocksTcpStream::KcpStream(ref mut stream) => std::pin::Pin::new(stream).poll_flush(cx),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,17 +137,121 @@ impl AsyncWrite for SocksTcpStream {
|
||||
SocksTcpStream::SmolTcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_shutdown(cx)
|
||||
}
|
||||
SocksTcpStream::KcpStream(ref mut stream) => {
|
||||
std::pin::Pin::new(stream).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum Socks5EntryData {
|
||||
Tcp(TcpListener), // hold a binded socket to hold the tcp port
|
||||
Udp((Arc<SocksUdpSocket>, UdpClientKey)), // hold the socket to send data to dst
|
||||
}
|
||||
|
||||
const UDP_ENTRY: u8 = 1;
|
||||
const TCP_ENTRY: u8 = 2;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||
struct Socks5Entry {
|
||||
src: SocketAddr,
|
||||
dst: SocketAddr,
|
||||
entry_type: u8,
|
||||
}
|
||||
|
||||
type Socks5EntrySet = Arc<DashSet<Socks5Entry>>;
|
||||
type Socks5EntrySet = Arc<DashMap<Socks5Entry, Socks5EntryData>>;
|
||||
|
||||
struct SmolTcpConnector {
|
||||
net: Arc<Net>,
|
||||
entries: Socks5EntrySet,
|
||||
current_entry: std::sync::Mutex<Option<Socks5Entry>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl AsyncTcpConnector for SmolTcpConnector {
|
||||
type S = SocksTcpStream;
|
||||
|
||||
async fn tcp_connect(
|
||||
&self,
|
||||
addr: SocketAddr,
|
||||
timeout_s: u64,
|
||||
) -> crate::gateway::fast_socks5::Result<SocksTcpStream> {
|
||||
let tmp_listener = TcpListener::bind("0.0.0.0:0").await?;
|
||||
let local_addr = self.net.get_address();
|
||||
let port = tmp_listener.local_addr()?.port();
|
||||
|
||||
let entry = Socks5Entry {
|
||||
src: SocketAddr::new(local_addr, port),
|
||||
dst: addr,
|
||||
entry_type: TCP_ENTRY,
|
||||
};
|
||||
*self.current_entry.lock().unwrap() = Some(entry.clone());
|
||||
self.entries
|
||||
.insert(entry, Socks5EntryData::Tcp(tmp_listener));
|
||||
|
||||
if addr.ip() == local_addr {
|
||||
let modified_addr =
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), addr.port());
|
||||
|
||||
Ok(SocksTcpStream::TcpStream(
|
||||
tcp_connect_with_timeout(modified_addr, timeout_s).await?,
|
||||
))
|
||||
} else {
|
||||
let remote_socket = timeout(
|
||||
Duration::from_secs(timeout_s),
|
||||
self.net.tcp_connect(addr, port),
|
||||
)
|
||||
.await
|
||||
.with_context(|| "connect to remote timeout")?;
|
||||
|
||||
Ok(SocksTcpStream::SmolTcpStream(remote_socket.map_err(
|
||||
|e| super::fast_socks5::SocksError::Other(e.into()),
|
||||
)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SmolTcpConnector {
|
||||
fn drop(&mut self) {
|
||||
if let Some(entry) = self.current_entry.lock().unwrap().take() {
|
||||
self.entries.remove(&entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Socks5KcpConnector {
|
||||
kcp_endpoint: Weak<KcpEndpoint>,
|
||||
peer_mgr: Weak<PeerManager>,
|
||||
src_addr: SocketAddr,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl AsyncTcpConnector for Socks5KcpConnector {
|
||||
type S = SocksTcpStream;
|
||||
|
||||
async fn tcp_connect(
|
||||
&self,
|
||||
addr: SocketAddr,
|
||||
_timeout_s: u64,
|
||||
) -> crate::gateway::fast_socks5::Result<SocksTcpStream> {
|
||||
let Some(kcp_endpoint) = self.kcp_endpoint.upgrade() else {
|
||||
return Err(anyhow::anyhow!("kcp endpoint is not ready").into());
|
||||
};
|
||||
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
|
||||
return Err(anyhow::anyhow!("peer mgr is not ready").into());
|
||||
};
|
||||
let c = NatDstKcpConnector {
|
||||
kcp_endpoint,
|
||||
peer_mgr,
|
||||
};
|
||||
println!("connect to kcp endpoint, addr = {:?}", addr);
|
||||
let ret = c
|
||||
.connect(self.src_addr, addr)
|
||||
.await
|
||||
.map_err(|e| super::fast_socks5::SocksError::Other(e.into()))?;
|
||||
Ok(SocksTcpStream::KcpStream(ret))
|
||||
}
|
||||
}
|
||||
|
||||
struct Socks5ServerNet {
|
||||
ipv4_addr: cidr::Ipv4Inet,
|
||||
@@ -130,7 +273,7 @@ impl Socks5ServerNet {
|
||||
) -> Self {
|
||||
let mut forward_tasks = JoinSet::new();
|
||||
let mut cap = smoltcp::phy::DeviceCapabilities::default();
|
||||
cap.max_transmission_unit = 1280;
|
||||
cap.max_transmission_unit = 1284; // 1284 - 20 can be divided by 8 (fragment offset unit)
|
||||
cap.medium = smoltcp::phy::Medium::Ip;
|
||||
let (dev, stack_sink, mut stack_stream) = channel_device::ChannelDevice::new(cap);
|
||||
|
||||
@@ -151,7 +294,8 @@ impl Socks5ServerNet {
|
||||
while let Some(data) = stack_stream.recv().await {
|
||||
tracing::trace!(
|
||||
?data,
|
||||
"receive from smoltcp stack and send to peer mgr packet"
|
||||
"receive from smoltcp stack and send to peer mgr packet, len = {}",
|
||||
data.len()
|
||||
);
|
||||
let Some(ipv4) = Ipv4Packet::new(&data) else {
|
||||
tracing::error!(?data, "smoltcp stack stream get non ipv4 packet");
|
||||
@@ -197,69 +341,14 @@ impl Socks5ServerNet {
|
||||
config.set_skip_auth(false);
|
||||
config.set_allow_no_auth(true);
|
||||
|
||||
struct SmolTcpConnector(
|
||||
Arc<Net>,
|
||||
Socks5EntrySet,
|
||||
std::sync::Mutex<Option<Socks5Entry>>,
|
||||
);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl AsyncTcpConnector for SmolTcpConnector {
|
||||
type S = SocksTcpStream;
|
||||
|
||||
async fn tcp_connect(
|
||||
&self,
|
||||
addr: SocketAddr,
|
||||
timeout_s: u64,
|
||||
) -> crate::gateway::fast_socks5::Result<SocksTcpStream> {
|
||||
let local_addr = self.0.get_address();
|
||||
let port = self.0.get_port();
|
||||
|
||||
let entry = Socks5Entry {
|
||||
src: SocketAddr::new(local_addr, port),
|
||||
dst: addr,
|
||||
};
|
||||
*self.2.lock().unwrap() = Some(entry.clone());
|
||||
self.1.insert(entry);
|
||||
|
||||
if addr.ip() == local_addr {
|
||||
let modified_addr =
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), addr.port());
|
||||
|
||||
Ok(SocksTcpStream::TcpStream(
|
||||
tcp_connect_with_timeout(modified_addr, timeout_s).await?,
|
||||
))
|
||||
} else {
|
||||
let remote_socket = timeout(
|
||||
Duration::from_secs(timeout_s),
|
||||
self.0.tcp_connect(addr, port),
|
||||
)
|
||||
.await
|
||||
.with_context(|| "connect to remote timeout")?;
|
||||
|
||||
Ok(SocksTcpStream::SmolTcpStream(remote_socket.map_err(
|
||||
|e| super::fast_socks5::SocksError::Other(e.into()),
|
||||
)?))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for SmolTcpConnector {
|
||||
fn drop(&mut self) {
|
||||
if let Some(entry) = self.2.lock().unwrap().take() {
|
||||
self.1.remove(&entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let socket = Socks5Socket::new(
|
||||
stream,
|
||||
Arc::new(config),
|
||||
SmolTcpConnector(
|
||||
self.smoltcp_net.clone(),
|
||||
self.entries.clone(),
|
||||
std::sync::Mutex::new(None),
|
||||
),
|
||||
SmolTcpConnector {
|
||||
net: self.smoltcp_net.clone(),
|
||||
entries: self.entries.clone(),
|
||||
current_entry: std::sync::Mutex::new(None),
|
||||
},
|
||||
);
|
||||
|
||||
self.forward_tasks.lock().unwrap().spawn(async move {
|
||||
@@ -275,17 +364,38 @@ impl Socks5ServerNet {
|
||||
}
|
||||
}
|
||||
|
||||
struct UdpClientInfo {
|
||||
client_addr: SocketAddr,
|
||||
port_holder_socket: Arc<UdpSocket>,
|
||||
local_addr: SocketAddr,
|
||||
last_active: AtomicCell<Instant>,
|
||||
entries: Socks5EntrySet,
|
||||
entry_key: Socks5Entry,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||
struct UdpClientKey {
|
||||
client_addr: SocketAddr,
|
||||
dst_addr: SocketAddr,
|
||||
}
|
||||
|
||||
pub struct Socks5Server {
|
||||
global_ctx: Arc<GlobalCtx>,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
auth: Option<SimpleUserPassword>,
|
||||
|
||||
tasks: Arc<Mutex<JoinSet<()>>>,
|
||||
tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
|
||||
packet_sender: mpsc::Sender<ZCPacket>,
|
||||
packet_recv: Arc<Mutex<mpsc::Receiver<ZCPacket>>>,
|
||||
|
||||
net: Arc<Mutex<Option<Socks5ServerNet>>>,
|
||||
entries: Socks5EntrySet,
|
||||
|
||||
tcp_forward_task: Arc<std::sync::Mutex<JoinSet<()>>>,
|
||||
udp_client_map: Arc<DashMap<UdpClientKey, Arc<UdpClientInfo>>>,
|
||||
udp_forward_task: Arc<DashMap<UdpClientKey, ScopedTask<()>>>,
|
||||
|
||||
kcp_endpoint: Mutex<Option<Weak<KcpEndpoint>>>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -299,22 +409,65 @@ impl PeerPacketFilter for Socks5Server {
|
||||
let payload_bytes = packet.payload();
|
||||
|
||||
let ipv4 = Ipv4Packet::new(payload_bytes).unwrap();
|
||||
if ipv4.get_version() != 4 || ipv4.get_next_level_protocol() != IpNextHeaderProtocols::Tcp {
|
||||
if ipv4.get_version() != 4 {
|
||||
return Some(packet);
|
||||
}
|
||||
|
||||
let tcp_packet = TcpPacket::new(ipv4.payload()).unwrap();
|
||||
let entry = Socks5Entry {
|
||||
dst: SocketAddr::new(ipv4.get_source().into(), tcp_packet.get_source()),
|
||||
src: SocketAddr::new(ipv4.get_destination().into(), tcp_packet.get_destination()),
|
||||
let entry_key = match ipv4.get_next_level_protocol() {
|
||||
IpNextHeaderProtocols::Tcp => {
|
||||
let tcp_packet = TcpPacket::new(ipv4.payload()).unwrap();
|
||||
Socks5Entry {
|
||||
dst: SocketAddr::new(ipv4.get_source().into(), tcp_packet.get_source()),
|
||||
src: SocketAddr::new(
|
||||
ipv4.get_destination().into(),
|
||||
tcp_packet.get_destination(),
|
||||
),
|
||||
entry_type: TCP_ENTRY,
|
||||
}
|
||||
}
|
||||
|
||||
IpNextHeaderProtocols::Udp => {
|
||||
if IpReassembler::is_packet_fragmented(&ipv4) && !self.entries.is_empty() {
|
||||
let ipv4_src: IpAddr = ipv4.get_source().into();
|
||||
// only send to smoltcp if the ipv4 src is in the entries
|
||||
let is_in_entries = self.entries.iter().any(|x| x.key().dst.ip() == ipv4_src);
|
||||
tracing::trace!(
|
||||
?is_in_entries,
|
||||
"ipv4 src = {:?}, check need send both smoltcp and kernel tun",
|
||||
ipv4_src
|
||||
);
|
||||
if is_in_entries {
|
||||
// if the packet is fragmented, no matther what the payload is, need send it to both smoltcp and kernel tun. because
|
||||
// we cannot determine the udp port of the packet.
|
||||
let _ = self.packet_sender.try_send(packet.clone()).ok();
|
||||
}
|
||||
return Some(packet);
|
||||
}
|
||||
|
||||
let udp_packet = UdpPacket::new(ipv4.payload()).unwrap();
|
||||
Socks5Entry {
|
||||
dst: SocketAddr::new(ipv4.get_source().into(), udp_packet.get_source()),
|
||||
src: SocketAddr::new(
|
||||
ipv4.get_destination().into(),
|
||||
udp_packet.get_destination(),
|
||||
),
|
||||
entry_type: UDP_ENTRY,
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Some(packet);
|
||||
}
|
||||
};
|
||||
|
||||
if !self.entries.contains(&entry) {
|
||||
if !self.entries.contains_key(&entry_key) {
|
||||
return Some(packet);
|
||||
}
|
||||
|
||||
tracing::trace!(?entry_key, ?ipv4, "socks5 found entry for packet from peer");
|
||||
|
||||
let _ = self.packet_sender.try_send(packet).ok();
|
||||
return None;
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,12 +483,18 @@ impl Socks5Server {
|
||||
peer_manager,
|
||||
auth,
|
||||
|
||||
tasks: Arc::new(Mutex::new(JoinSet::new())),
|
||||
tasks: Arc::new(std::sync::Mutex::new(JoinSet::new())),
|
||||
packet_recv: Arc::new(Mutex::new(packet_recv)),
|
||||
packet_sender,
|
||||
|
||||
net: Arc::new(Mutex::new(None)),
|
||||
entries: Arc::new(DashSet::new()),
|
||||
entries: Arc::new(DashMap::new()),
|
||||
|
||||
tcp_forward_task: Arc::new(std::sync::Mutex::new(JoinSet::new())),
|
||||
udp_client_map: Arc::new(DashMap::new()),
|
||||
udp_forward_task: Arc::new(DashMap::new()),
|
||||
|
||||
kcp_endpoint: Mutex::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -345,7 +504,9 @@ impl Socks5Server {
|
||||
let peer_manager = self.peer_manager.clone();
|
||||
let packet_recv = self.packet_recv.clone();
|
||||
let entries = self.entries.clone();
|
||||
self.tasks.lock().await.spawn(async move {
|
||||
let tcp_forward_task = self.tcp_forward_task.clone();
|
||||
let udp_client_map = self.udp_client_map.clone();
|
||||
self.tasks.lock().unwrap().spawn(async move {
|
||||
let mut prev_ipv4 = None;
|
||||
loop {
|
||||
let mut event_recv = global_ctx.subscribe();
|
||||
@@ -353,7 +514,10 @@ impl Socks5Server {
|
||||
let cur_ipv4 = global_ctx.get_ipv4();
|
||||
if prev_ipv4 != cur_ipv4 {
|
||||
prev_ipv4 = cur_ipv4;
|
||||
|
||||
entries.clear();
|
||||
tcp_forward_task.lock().unwrap().abort_all();
|
||||
udp_client_map.clear();
|
||||
|
||||
if cur_ipv4.is_none() {
|
||||
let _ = net.lock().await.take();
|
||||
@@ -376,43 +540,356 @@ impl Socks5Server {
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn run(self: &Arc<Self>) -> Result<(), Error> {
|
||||
let Some(proxy_url) = self.global_ctx.config.get_socks5_portal() else {
|
||||
return Ok(());
|
||||
pub async fn run(
|
||||
self: &Arc<Self>,
|
||||
kcp_endpoint: Option<Weak<KcpEndpoint>>,
|
||||
) -> Result<(), Error> {
|
||||
*self.kcp_endpoint.lock().await = kcp_endpoint;
|
||||
let mut need_start = false;
|
||||
if let Some(proxy_url) = self.global_ctx.config.get_socks5_portal() {
|
||||
let bind_addr = format!(
|
||||
"{}:{}",
|
||||
proxy_url.host_str().unwrap(),
|
||||
proxy_url.port().unwrap()
|
||||
);
|
||||
|
||||
let listener = {
|
||||
let _g = self.global_ctx.net_ns.guard();
|
||||
TcpListener::bind(bind_addr.parse::<SocketAddr>().unwrap()).await?
|
||||
};
|
||||
|
||||
let net = self.net.clone();
|
||||
self.tasks.lock().unwrap().spawn(async move {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((socket, _addr)) => {
|
||||
tracing::info!("accept a new connection, {:?}", socket);
|
||||
if let Some(net) = net.lock().await.as_ref() {
|
||||
net.handle_tcp_stream(socket);
|
||||
}
|
||||
}
|
||||
Err(err) => tracing::error!("accept error = {:?}", err),
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_joinset_background(self.tasks.clone(), "socks5 server".to_string());
|
||||
|
||||
need_start = true;
|
||||
};
|
||||
|
||||
let bind_addr = format!(
|
||||
"{}:{}",
|
||||
proxy_url.host_str().unwrap(),
|
||||
proxy_url.port().unwrap()
|
||||
);
|
||||
for port_forward in self.global_ctx.config.get_port_forwards() {
|
||||
self.add_port_forward(port_forward).await?;
|
||||
need_start = true;
|
||||
}
|
||||
|
||||
if need_start {
|
||||
self.peer_manager
|
||||
.add_packet_process_pipeline(Box::new(self.clone()))
|
||||
.await;
|
||||
|
||||
self.run_net_update_task().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_port_forward_connection(
|
||||
mut incoming_socket: tokio::net::TcpStream,
|
||||
connector: Box<dyn AsyncTcpConnector<S = SocksTcpStream> + Send>,
|
||||
dst_addr: SocketAddr,
|
||||
) {
|
||||
let outgoing_socket = match connector.tcp_connect(dst_addr, 10).await {
|
||||
Ok(socket) => socket,
|
||||
Err(e) => {
|
||||
tracing::error!("port forward: failed to connect to destination: {:?}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let mut outgoing_socket = outgoing_socket;
|
||||
match tokio::io::copy_bidirectional(&mut incoming_socket, &mut outgoing_socket).await {
|
||||
Ok((from_client, from_server)) => {
|
||||
tracing::info!(
|
||||
"port forward connection finished: client->server: {} bytes, server->client: {} bytes",
|
||||
from_client, from_server
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("port forward connection error: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn add_port_forward(&self, cfg: PortForwardConfig) -> Result<(), Error> {
|
||||
match cfg.proto.to_lowercase().as_str() {
|
||||
"tcp" => {
|
||||
self.add_tcp_port_forward(cfg.bind_addr, cfg.dst_addr)
|
||||
.await?;
|
||||
}
|
||||
"udp" => {
|
||||
self.add_udp_port_forward(cfg.bind_addr, cfg.dst_addr)
|
||||
.await?;
|
||||
}
|
||||
_ => {
|
||||
return Err(anyhow::anyhow!(
|
||||
"unsupported protocol: {}, only support udp / tcp",
|
||||
cfg.proto
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
self.global_ctx
|
||||
.issue_event(GlobalCtxEvent::PortForwardAdded(cfg.clone().into()));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn add_tcp_port_forward(
|
||||
&self,
|
||||
bind_addr: SocketAddr,
|
||||
dst_addr: SocketAddr,
|
||||
) -> Result<(), Error> {
|
||||
let listener = {
|
||||
let _g = self.global_ctx.net_ns.guard();
|
||||
TcpListener::bind(bind_addr.parse::<SocketAddr>().unwrap()).await?
|
||||
TcpListener::bind(bind_addr).await?
|
||||
};
|
||||
|
||||
self.peer_manager
|
||||
.add_packet_process_pipeline(Box::new(self.clone()))
|
||||
.await;
|
||||
|
||||
self.run_net_update_task().await;
|
||||
|
||||
let net = self.net.clone();
|
||||
self.tasks.lock().await.spawn(async move {
|
||||
let entries = self.entries.clone();
|
||||
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
|
||||
let forward_tasks = tasks.clone();
|
||||
let kcp_endpoint = self.kcp_endpoint.lock().await.clone();
|
||||
let peer_mgr = Arc::downgrade(&self.peer_manager.clone());
|
||||
|
||||
self.tasks.lock().unwrap().spawn(async move {
|
||||
loop {
|
||||
match listener.accept().await {
|
||||
Ok((socket, _addr)) => {
|
||||
tracing::info!("accept a new connection, {:?}", socket);
|
||||
if let Some(net) = net.lock().await.as_ref() {
|
||||
net.handle_tcp_stream(socket);
|
||||
}
|
||||
let (incoming_socket, addr) = match listener.accept().await {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
tracing::error!("port forward accept error = {:?}", err);
|
||||
continue;
|
||||
}
|
||||
Err(err) => tracing::error!("accept error = {:?}", err),
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
"port forward: accept new connection from {:?} to {:?}",
|
||||
bind_addr,
|
||||
dst_addr
|
||||
);
|
||||
|
||||
let net_guard = net.lock().await;
|
||||
let Some(net) = net_guard.as_ref() else {
|
||||
tracing::error!("net is not ready");
|
||||
continue;
|
||||
};
|
||||
|
||||
let connector: Box<dyn AsyncTcpConnector<S = SocksTcpStream> + Send> =
|
||||
if kcp_endpoint.is_none() {
|
||||
Box::new(SmolTcpConnector {
|
||||
net: net.smoltcp_net.clone(),
|
||||
entries: entries.clone(),
|
||||
current_entry: std::sync::Mutex::new(None),
|
||||
})
|
||||
} else {
|
||||
let kcp_endpoint = kcp_endpoint.as_ref().unwrap().clone();
|
||||
Box::new(Socks5KcpConnector {
|
||||
kcp_endpoint,
|
||||
peer_mgr: peer_mgr.clone(),
|
||||
src_addr: addr,
|
||||
})
|
||||
};
|
||||
|
||||
forward_tasks
|
||||
.lock()
|
||||
.unwrap()
|
||||
.spawn(Self::handle_port_forward_connection(
|
||||
incoming_socket,
|
||||
connector,
|
||||
dst_addr,
|
||||
));
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(name = "add_udp_port_forward", skip(self))]
|
||||
pub async fn add_udp_port_forward(
|
||||
&self,
|
||||
bind_addr: SocketAddr,
|
||||
dst_addr: SocketAddr,
|
||||
) -> Result<(), Error> {
|
||||
let socket = {
|
||||
let _g = self.global_ctx.net_ns.guard();
|
||||
Arc::new(UdpSocket::bind(bind_addr).await?)
|
||||
};
|
||||
|
||||
let entries = self.entries.clone();
|
||||
let net_ns = self.global_ctx.net_ns.clone();
|
||||
let net = self.net.clone();
|
||||
let udp_client_map = self.udp_client_map.clone();
|
||||
let udp_forward_task = self.udp_forward_task.clone();
|
||||
|
||||
self.tasks.lock().unwrap().spawn(async move {
|
||||
loop {
|
||||
// we set the max buffer size of smoltcp to 8192, so we need to use a buffer size that is less than 8192 here.
|
||||
let mut buf = vec![0u8; 8192];
|
||||
let (len, addr) = match socket.recv_from(&mut buf).await {
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
tracing::error!("udp port forward recv error = {:?}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
tracing::trace!(
|
||||
"udp port forward recv packet from {:?}, len = {}",
|
||||
addr,
|
||||
len
|
||||
);
|
||||
|
||||
let udp_client_key = UdpClientKey {
|
||||
client_addr: addr,
|
||||
dst_addr,
|
||||
};
|
||||
|
||||
let binded_socket = udp_client_map.get(&udp_client_key);
|
||||
let client_info = match binded_socket {
|
||||
Some(s) => s.clone(),
|
||||
None => {
|
||||
let _g = net_ns.guard();
|
||||
// reserve a port so os will not use it to connect to the virtual network
|
||||
let binded_socket = tokio::net::UdpSocket::bind("0.0.0.0:0").await;
|
||||
if binded_socket.is_err() {
|
||||
tracing::error!("udp port forward bind error = {:?}", binded_socket);
|
||||
continue;
|
||||
}
|
||||
let binded_socket = binded_socket.unwrap();
|
||||
let mut local_addr = binded_socket.local_addr().unwrap();
|
||||
let Some(cur_ipv4) = net.lock().await.as_ref().map(|net| net.ipv4_addr) else {
|
||||
continue;
|
||||
};
|
||||
local_addr.set_ip(cur_ipv4.address().into());
|
||||
|
||||
let entry_key = Socks5Entry {
|
||||
src: local_addr,
|
||||
dst: dst_addr,
|
||||
entry_type: UDP_ENTRY,
|
||||
};
|
||||
|
||||
tracing::debug!("udp port forward binded socket = {:?}, entry_key = {:?}", local_addr, entry_key);
|
||||
|
||||
let client_info = Arc::new(UdpClientInfo {
|
||||
client_addr: addr,
|
||||
port_holder_socket: Arc::new(binded_socket),
|
||||
local_addr,
|
||||
last_active: AtomicCell::new(Instant::now()),
|
||||
entries: entries.clone(),
|
||||
entry_key,
|
||||
});
|
||||
udp_client_map.insert(udp_client_key.clone(), client_info.clone());
|
||||
client_info
|
||||
}
|
||||
};
|
||||
|
||||
client_info.last_active.store(Instant::now());
|
||||
|
||||
let entry_data = match entries.get(&client_info.entry_key) {
|
||||
Some(data) => data,
|
||||
None => {
|
||||
let guard = net.lock().await;
|
||||
let Some(net) = guard.as_ref() else {
|
||||
continue;
|
||||
};
|
||||
let local_addr = net.ipv4_addr;
|
||||
let sokcs_udp = if dst_addr.ip() == local_addr.address() {
|
||||
SocksUdpSocket::UdpSocket(client_info.port_holder_socket.clone())
|
||||
} else {
|
||||
tracing::debug!("udp port forward bind new smol udp socket, {:?}", local_addr);
|
||||
SocksUdpSocket::SmolUdpSocket(
|
||||
net.smoltcp_net
|
||||
.udp_bind(SocketAddr::new(
|
||||
IpAddr::V4(local_addr.address()),
|
||||
client_info.local_addr.port(),
|
||||
))
|
||||
.await
|
||||
.unwrap(),
|
||||
)
|
||||
};
|
||||
let socks_udp = Arc::new(sokcs_udp);
|
||||
entries.insert(
|
||||
client_info.entry_key.clone(),
|
||||
Socks5EntryData::Udp((socks_udp.clone(), udp_client_key.clone())),
|
||||
);
|
||||
|
||||
let socks = socket.clone();
|
||||
let client_addr = addr;
|
||||
udp_forward_task.insert(
|
||||
udp_client_key.clone(),
|
||||
ScopedTask::from(tokio::spawn(async move {
|
||||
loop {
|
||||
let mut buf = vec![0u8; 8192];
|
||||
match socks_udp.recv_from(&mut buf).await {
|
||||
Ok((len, dst_addr)) => {
|
||||
tracing::trace!(
|
||||
"udp port forward recv response packet from {:?}, len = {}, client_addr = {:?}",
|
||||
dst_addr,
|
||||
len,
|
||||
client_addr
|
||||
);
|
||||
if let Err(e) = socks.send_to(&buf[..len], client_addr).await {
|
||||
tracing::error!("udp forward send error = {:?}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("udp forward recv error = {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
})),
|
||||
);
|
||||
|
||||
entries.get(&client_info.entry_key).unwrap()
|
||||
}
|
||||
};
|
||||
|
||||
let s = match entry_data.value() {
|
||||
Socks5EntryData::Udp((s, _)) => s.clone(),
|
||||
_ => {
|
||||
panic!("udp entry data is not udp entry data");
|
||||
}
|
||||
};
|
||||
drop(entry_data);
|
||||
|
||||
if let Err(e) = s.send_to(&buf[..len], dst_addr).await {
|
||||
tracing::error!(?dst_addr, ?len, "udp port forward send error = {:?}", e);
|
||||
} else {
|
||||
tracing::trace!(?dst_addr, ?len, "udp port forward send packet success");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// clean up task
|
||||
let udp_client_map = self.udp_client_map.clone();
|
||||
let udp_forward_task = self.udp_forward_task.clone();
|
||||
let entries = self.entries.clone();
|
||||
self.tasks.lock().unwrap().spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||
let now = Instant::now();
|
||||
udp_client_map.retain(|_, client_info| {
|
||||
now.duration_since(client_info.last_active.load()).as_secs() < 600
|
||||
});
|
||||
udp_forward_task.retain(|k, _| udp_client_map.contains_key(&k));
|
||||
entries.retain(|_, data| match data {
|
||||
Socks5EntryData::Udp((_, udp_client_key)) => {
|
||||
udp_client_map.contains_key(&udp_client_key)
|
||||
}
|
||||
_ => true,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,9 +351,10 @@ impl<C: NatDstConnector> PeerPacketFilter for TcpProxy<C> {
|
||||
#[async_trait::async_trait]
|
||||
impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
|
||||
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
|
||||
let Some(my_ipv4) = self.get_local_ip() else {
|
||||
let Some(my_ipv4_inet) = self.get_local_inet() else {
|
||||
return false;
|
||||
};
|
||||
let my_ipv4 = my_ipv4_inet.address();
|
||||
|
||||
let data = zc_packet.payload();
|
||||
let ip_packet = Ipv4Packet::new(data).unwrap();
|
||||
@@ -377,7 +378,7 @@ impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
|
||||
|
||||
// for kcp proxy, the src ip of nat entry will be converted from my ip to fake ip
|
||||
// here we need to convert it back
|
||||
if !self.is_smoltcp_enabled() && dst_addr.ip() == Self::get_fake_local_ipv4(my_ipv4) {
|
||||
if !self.is_smoltcp_enabled() && dst_addr.ip() == Self::get_fake_local_ipv4(&my_ipv4_inet) {
|
||||
dst_addr.set_ip(IpAddr::V4(my_ipv4));
|
||||
need_transform_dst = true;
|
||||
}
|
||||
@@ -620,13 +621,15 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
continue;
|
||||
};
|
||||
|
||||
let my_ip = global_ctx
|
||||
.get_ipv4()
|
||||
let my_ip_inet = global_ctx.get_ipv4();
|
||||
let my_ip = my_ip_inet
|
||||
.as_ref()
|
||||
.map(Ipv4Inet::address)
|
||||
.unwrap_or(Ipv4Addr::UNSPECIFIED);
|
||||
|
||||
if socket_addr.ip() == Self::get_fake_local_ipv4(my_ip) {
|
||||
if my_ip_inet.is_some()
|
||||
&& socket_addr.ip() == Self::get_fake_local_ipv4(&my_ip_inet.unwrap())
|
||||
{
|
||||
socket_addr.set_ip(IpAddr::V4(my_ip));
|
||||
}
|
||||
|
||||
@@ -768,13 +771,14 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
}
|
||||
|
||||
pub fn get_local_ip(&self) -> Option<Ipv4Addr> {
|
||||
self.get_local_inet().map(|inet| inet.address())
|
||||
}
|
||||
|
||||
pub fn get_local_inet(&self) -> Option<Ipv4Inet> {
|
||||
if self.is_smoltcp_enabled() {
|
||||
Some(Ipv4Addr::new(192, 88, 99, 254))
|
||||
Some(Ipv4Inet::new(Ipv4Addr::new(192, 88, 99, 254), 24).unwrap())
|
||||
} else {
|
||||
self.global_ctx
|
||||
.get_ipv4()
|
||||
.as_ref()
|
||||
.map(cidr::Ipv4Inet::address)
|
||||
self.global_ctx.get_ipv4().as_ref().cloned()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -787,9 +791,8 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn get_fake_local_ipv4(local_ip: Ipv4Addr) -> Ipv4Addr {
|
||||
let octets = local_ip.octets();
|
||||
Ipv4Addr::new(octets[0], octets[1], octets[2], 0)
|
||||
pub fn get_fake_local_ipv4(local_ip: &Ipv4Inet) -> Ipv4Addr {
|
||||
local_ip.first_address()
|
||||
}
|
||||
|
||||
async fn try_handle_peer_packet(&self, packet: &mut ZCPacket) -> Option<()> {
|
||||
@@ -800,7 +803,8 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
return None;
|
||||
}
|
||||
|
||||
let ipv4_addr = self.get_local_ip()?;
|
||||
let ipv4_inet = self.get_local_inet()?;
|
||||
let ipv4_addr = ipv4_inet.address();
|
||||
let hdr = packet.peer_manager_header().unwrap().clone();
|
||||
|
||||
if hdr.packet_type != PacketType::Data as u8 || hdr.is_no_proxy() {
|
||||
@@ -849,7 +853,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
let mut ip_packet = MutableIpv4Packet::new(payload_bytes).unwrap();
|
||||
if !self.is_smoltcp_enabled() && source_ip == ipv4_addr {
|
||||
// modify the source so the response packet can be handled by tun device
|
||||
ip_packet.set_source(Self::get_fake_local_ipv4(ipv4_addr));
|
||||
ip_packet.set_source(Self::get_fake_local_ipv4(&ipv4_inet));
|
||||
}
|
||||
ip_packet.set_destination(ipv4_addr);
|
||||
let source = ip_packet.get_source();
|
||||
|
||||
@@ -20,7 +20,7 @@ use smoltcp::{
|
||||
time::{Duration, Instant},
|
||||
wire::{HardwareAddress, IpAddress, IpCidr},
|
||||
};
|
||||
pub use socket::{TcpListener, TcpStream};
|
||||
pub use socket::{TcpListener, TcpStream, UdpSocket};
|
||||
pub use socket_allocator::BufferSize;
|
||||
use tokio::sync::Notify;
|
||||
|
||||
@@ -158,6 +158,13 @@ impl Net {
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// This function will create a new UDP socket and attempt to bind it to the `addr` provided.
|
||||
pub async fn udp_bind(&self, addr: SocketAddr) -> io::Result<UdpSocket> {
|
||||
let addr = self.set_address(addr);
|
||||
UdpSocket::new(self.reactor.clone(), addr.into()).await
|
||||
}
|
||||
|
||||
fn set_address(&self, mut addr: SocketAddr) -> SocketAddr {
|
||||
if addr.ip().is_unspecified() {
|
||||
addr.set_ip(match self.ip_addr.address() {
|
||||
|
||||
@@ -2,6 +2,7 @@ use super::{reactor::Reactor, socket_allocator::SocketHandle};
|
||||
use futures::future::{self, poll_fn};
|
||||
use futures::{ready, Stream};
|
||||
pub use smoltcp::socket::tcp;
|
||||
use smoltcp::socket::udp;
|
||||
use smoltcp::wire::{IpAddress, IpEndpoint};
|
||||
use std::mem::replace;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
@@ -247,3 +248,86 @@ impl AsyncWrite for TcpStream {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
/// A UDP socket.
|
||||
pub struct UdpSocket {
|
||||
handle: SocketHandle,
|
||||
reactor: Arc<Reactor>,
|
||||
local_addr: SocketAddr,
|
||||
}
|
||||
|
||||
impl UdpSocket {
|
||||
pub(super) async fn new(
|
||||
reactor: Arc<Reactor>,
|
||||
local_endpoint: IpEndpoint,
|
||||
) -> io::Result<UdpSocket> {
|
||||
let handle = reactor.socket_allocator().new_udp_socket();
|
||||
{
|
||||
let mut socket = reactor.get_socket::<udp::Socket>(*handle);
|
||||
socket.bind(local_endpoint).map_err(map_err)?;
|
||||
}
|
||||
|
||||
let local_addr = ep2sa(&local_endpoint);
|
||||
|
||||
Ok(UdpSocket {
|
||||
handle,
|
||||
reactor,
|
||||
local_addr,
|
||||
})
|
||||
}
|
||||
/// Note that on multiple calls to a poll_* method in the send direction, only the Waker from the Context passed to the most recent call will be scheduled to receive a wakeup.
|
||||
pub fn poll_send_to(
|
||||
&self,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &[u8],
|
||||
target: SocketAddr,
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let mut socket = self.reactor.get_socket::<udp::Socket>(*self.handle);
|
||||
let target_ip: IpEndpoint = target.into();
|
||||
|
||||
match socket.send_slice(buf, target_ip) {
|
||||
// the buffer is full
|
||||
Err(udp::SendError::BufferFull) => {}
|
||||
r => {
|
||||
r.map_err(map_err)?;
|
||||
self.reactor.notify();
|
||||
return Poll::Ready(Ok(buf.len()));
|
||||
}
|
||||
}
|
||||
|
||||
socket.register_send_waker(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
/// See note on `poll_send_to`
|
||||
pub async fn send_to(&self, buf: &[u8], target: SocketAddr) -> io::Result<usize> {
|
||||
poll_fn(|cx| self.poll_send_to(cx, buf, target)).await
|
||||
}
|
||||
/// Note that on multiple calls to a poll_* method in the recv direction, only the Waker from the Context passed to the most recent call will be scheduled to receive a wakeup.
|
||||
pub fn poll_recv_from(
|
||||
&self,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut [u8],
|
||||
) -> Poll<io::Result<(usize, SocketAddr)>> {
|
||||
let mut socket = self.reactor.get_socket::<udp::Socket>(*self.handle);
|
||||
|
||||
match socket.recv_slice(buf) {
|
||||
// the buffer is empty
|
||||
Err(udp::RecvError::Exhausted) => {}
|
||||
r => {
|
||||
let (size, metadata) = r.map_err(map_err)?;
|
||||
self.reactor.notify();
|
||||
return Poll::Ready(Ok((size, ep2sa(&metadata.endpoint))));
|
||||
}
|
||||
}
|
||||
|
||||
socket.register_recv_waker(cx.waker());
|
||||
Poll::Pending
|
||||
}
|
||||
/// See note on `poll_recv_from`
|
||||
pub async fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SocketAddr)> {
|
||||
poll_fn(|cx| self.poll_recv_from(cx, buf)).await
|
||||
}
|
||||
pub fn local_addr(&self) -> io::Result<SocketAddr> {
|
||||
Ok(self.local_addr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use parking_lot::Mutex;
|
||||
use smoltcp::{
|
||||
iface::{SocketHandle as InnerSocketHandle, SocketSet},
|
||||
socket::tcp,
|
||||
socket::{tcp, udp},
|
||||
time::Duration,
|
||||
};
|
||||
use std::{
|
||||
@@ -14,6 +14,11 @@ use std::{
|
||||
pub struct BufferSize {
|
||||
pub tcp_rx_size: usize,
|
||||
pub tcp_tx_size: usize,
|
||||
|
||||
pub udp_rx_size: usize,
|
||||
pub udp_tx_size: usize,
|
||||
pub udp_rx_meta_size: usize,
|
||||
pub udp_tx_meta_size: usize,
|
||||
}
|
||||
|
||||
impl Default for BufferSize {
|
||||
@@ -21,6 +26,11 @@ impl Default for BufferSize {
|
||||
BufferSize {
|
||||
tcp_rx_size: 8192,
|
||||
tcp_tx_size: 8192,
|
||||
|
||||
udp_rx_size: 8192,
|
||||
udp_tx_size: 8192,
|
||||
udp_rx_meta_size: 32,
|
||||
udp_tx_meta_size: 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -59,6 +69,26 @@ impl SocketAlloctor {
|
||||
|
||||
tcp
|
||||
}
|
||||
|
||||
pub fn new_udp_socket(&self) -> SocketHandle {
|
||||
let mut set = self.sockets.lock();
|
||||
let handle = set.add(self.alloc_udp_socket());
|
||||
SocketHandle::new(handle, self.sockets.clone())
|
||||
}
|
||||
|
||||
fn alloc_udp_socket(&self) -> udp::Socket<'static> {
|
||||
let rx_buffer = udp::PacketBuffer::new(
|
||||
vec![udp::PacketMetadata::EMPTY; self.buffer_size.udp_rx_meta_size],
|
||||
vec![0; self.buffer_size.udp_rx_size],
|
||||
);
|
||||
let tx_buffer = udp::PacketBuffer::new(
|
||||
vec![udp::PacketMetadata::EMPTY; self.buffer_size.udp_tx_meta_size],
|
||||
vec![0; self.buffer_size.udp_tx_size],
|
||||
);
|
||||
let udp = udp::Socket::new(rx_buffer, tx_buffer);
|
||||
|
||||
udp
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SocketHandle(InnerSocketHandle, SharedSocketSet);
|
||||
|
||||
104
easytier/src/instance/dns_server/client_instance.rs
Normal file
104
easytier/src/instance/dns_server/client_instance.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
use crate::{
|
||||
peers::peer_manager::PeerManager,
|
||||
proto::{
|
||||
cli::Route,
|
||||
common::Void,
|
||||
magic_dns::{
|
||||
HandshakeRequest, MagicDnsServerRpc, MagicDnsServerRpcClientFactory,
|
||||
UpdateDnsRecordRequest,
|
||||
},
|
||||
rpc_impl::standalone::StandAloneClient,
|
||||
rpc_types::controller::BaseController,
|
||||
},
|
||||
tunnel::tcp::TcpTunnelConnector,
|
||||
};
|
||||
|
||||
use super::{DEFAULT_ET_DNS_ZONE, MAGIC_DNS_INSTANCE_ADDR};
|
||||
|
||||
pub struct MagicDnsClientInstance {
|
||||
rpc_client: StandAloneClient<TcpTunnelConnector>,
|
||||
rpc_stub: Option<Box<dyn MagicDnsServerRpc<Controller = BaseController> + Send>>,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
|
||||
impl MagicDnsClientInstance {
|
||||
pub async fn new(peer_mgr: Arc<PeerManager>) -> Result<Self, anyhow::Error> {
|
||||
let tcp_connector = TcpTunnelConnector::new(MAGIC_DNS_INSTANCE_ADDR.parse().unwrap());
|
||||
let mut rpc_client = StandAloneClient::new(tcp_connector);
|
||||
let rpc_stub = rpc_client
|
||||
.scoped_client::<MagicDnsServerRpcClientFactory<BaseController>>("".to_string())
|
||||
.await?;
|
||||
Ok(MagicDnsClientInstance {
|
||||
rpc_client,
|
||||
rpc_stub: Some(rpc_stub),
|
||||
peer_mgr,
|
||||
tasks: JoinSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn update_dns_task(
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
rpc_stub: Box<dyn MagicDnsServerRpc<Controller = BaseController> + Send>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let mut prev_last_update = None;
|
||||
rpc_stub
|
||||
.handshake(BaseController::default(), HandshakeRequest::default())
|
||||
.await?;
|
||||
loop {
|
||||
rpc_stub
|
||||
.heartbeat(BaseController::default(), Void::default())
|
||||
.await?;
|
||||
|
||||
let last_update = peer_mgr.get_route_peer_info_last_update_time().await;
|
||||
if Some(last_update) == prev_last_update {
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
continue;
|
||||
}
|
||||
prev_last_update = Some(last_update);
|
||||
let mut routes = peer_mgr.list_routes().await;
|
||||
// add self as a route
|
||||
let ctx = peer_mgr.get_global_ctx();
|
||||
routes.push(Route {
|
||||
hostname: ctx.get_hostname(),
|
||||
ipv4_addr: ctx.get_ipv4().map(Into::into),
|
||||
..Default::default()
|
||||
});
|
||||
let req = UpdateDnsRecordRequest {
|
||||
routes,
|
||||
zone: DEFAULT_ET_DNS_ZONE.to_string(),
|
||||
};
|
||||
tracing::debug!(
|
||||
"MagicDnsClientInstance::update_dns_task: update dns records: {:?}",
|
||||
req
|
||||
);
|
||||
rpc_stub
|
||||
.update_dns_record(BaseController::default(), req)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_and_wait(&mut self) {
|
||||
let rpc_stub = self.rpc_stub.take().unwrap();
|
||||
let peer_mgr = self.peer_mgr.clone();
|
||||
self.tasks.spawn(async move {
|
||||
let ret = Self::update_dns_task(peer_mgr, rpc_stub).await;
|
||||
if let Err(e) = ret {
|
||||
tracing::error!("MagicDnsServerInstanceData::run_and_wait: {:?}", e);
|
||||
}
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
_ = self.tasks.join_next() => {
|
||||
tracing::warn!("MagicDnsServerInstanceData::run_and_wait: dns record update task exited");
|
||||
}
|
||||
_ = self.rpc_client.wait() => {
|
||||
tracing::warn!("MagicDnsServerInstanceData::run_and_wait: rpc client exited");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
193
easytier/src/instance/dns_server/config.rs
Normal file
193
easytier/src/instance/dns_server/config.rs
Normal file
@@ -0,0 +1,193 @@
|
||||
use hickory_proto::rr;
|
||||
use hickory_proto::rr::RData;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, derive_builder::Builder)]
|
||||
pub struct RunConfig {
|
||||
general: GeneralConfig,
|
||||
|
||||
#[builder(default = HashMap::new())]
|
||||
zones: Zone,
|
||||
|
||||
#[builder(default = Vec::new())]
|
||||
#[serde(default)]
|
||||
excluded_forward_nameservers: Vec<IpAddr>,
|
||||
}
|
||||
|
||||
impl RunConfig {
|
||||
pub fn general(&self) -> &GeneralConfig {
|
||||
&self.general
|
||||
}
|
||||
|
||||
pub fn zones(&self) -> &Zone {
|
||||
&self.zones
|
||||
}
|
||||
|
||||
pub fn excluded_forward_nameservers(&self) -> &Vec<IpAddr> {
|
||||
&self.excluded_forward_nameservers
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, derive_builder::Builder)]
|
||||
pub struct GeneralConfig {
|
||||
#[builder(setter(into, strip_option), default = None)]
|
||||
listen_tcp: Option<String>,
|
||||
|
||||
#[builder(setter(into, strip_option), default = None)]
|
||||
listen_udp: Option<String>,
|
||||
}
|
||||
|
||||
impl GeneralConfig {
|
||||
pub fn listen_tcp(&self) -> &Option<String> {
|
||||
&self.listen_tcp
|
||||
}
|
||||
|
||||
pub fn listen_udp(&self) -> &Option<String> {
|
||||
&self.listen_udp
|
||||
}
|
||||
}
|
||||
|
||||
pub type Zone = HashMap<String, Vec<Record>>; // domain -> records
|
||||
|
||||
pub type RecordType = rr::RecordType;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, derive_builder::Builder)]
|
||||
pub struct Record {
|
||||
#[serde(rename = "type")]
|
||||
rr_type: RecordType,
|
||||
|
||||
name: String,
|
||||
value: String,
|
||||
|
||||
#[serde(with = "humantime_serde")]
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
impl Record {
|
||||
fn name(&self) -> anyhow::Result<rr::Name> {
|
||||
let name = rr::Name::from_str(self.name.as_str())?;
|
||||
Ok(name)
|
||||
}
|
||||
|
||||
fn rr_type(&self) -> rr::RecordType {
|
||||
self.rr_type.clone().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Record> for rr::Record {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: Record) -> Result<Self, Self::Error> {
|
||||
let r: rr::Record = (&value).try_into()?;
|
||||
Ok(r)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&Record> for rr::Record {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: &Record) -> Result<Self, Self::Error> {
|
||||
let name = value.name()?;
|
||||
let mut record = Self::update0(name, value.ttl.as_secs() as u32, value.rr_type());
|
||||
record.set_dns_class(rr::DNSClass::IN);
|
||||
match value.rr_type {
|
||||
RecordType::A => {
|
||||
let addr: Ipv4Addr = value.value.parse()?;
|
||||
record.set_data(RData::A(rr::rdata::a::A(addr)));
|
||||
}
|
||||
RecordType::SOA => {
|
||||
let soa = value.value.split_whitespace().collect::<Vec<_>>();
|
||||
if soa.len() != 7 {
|
||||
return Err(anyhow::anyhow!("invalid SOA record"));
|
||||
}
|
||||
let mname = rr::Name::from_str(soa[0])?;
|
||||
let rname = rr::Name::from_str(soa[1])?;
|
||||
let serial: u32 = soa[2].parse()?;
|
||||
let refresh: u32 = soa[3].parse()?;
|
||||
let retry: u32 = soa[4].parse()?;
|
||||
let expire: u32 = soa[5].parse()?;
|
||||
let minimum: u32 = soa[6].parse()?;
|
||||
record.set_data(RData::SOA(rr::rdata::soa::SOA::new(
|
||||
mname,
|
||||
rname,
|
||||
serial,
|
||||
refresh.try_into().unwrap(),
|
||||
retry.try_into().unwrap(),
|
||||
expire.try_into().unwrap(),
|
||||
minimum,
|
||||
)));
|
||||
}
|
||||
_ => todo!(),
|
||||
}
|
||||
Ok(record)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use anyhow::anyhow;
|
||||
|
||||
#[tokio::test]
|
||||
async fn it_works() -> anyhow::Result<()> {
|
||||
let text = r#"
|
||||
[general]
|
||||
listen_tcp = "127.0.0.1:5300"
|
||||
listen_udp = "127.0.0.1:5353"
|
||||
|
||||
[[zones."et.internal"]]
|
||||
type = "A"
|
||||
name = "www"
|
||||
value = "123.123.123.123"
|
||||
ttl = "60s"
|
||||
|
||||
[[zones."et.top"]]
|
||||
type = "A"
|
||||
name = "@"
|
||||
value = "100.100.100.100"
|
||||
ttl = "61s"
|
||||
|
||||
"#;
|
||||
|
||||
let config = toml::from_str::<RunConfig>(text)?;
|
||||
assert_eq!(
|
||||
config.general.listen_tcp().clone().unwrap(),
|
||||
"127.0.0.1:5300"
|
||||
);
|
||||
assert_eq!(
|
||||
config.general.listen_udp().clone().unwrap(),
|
||||
"127.0.0.1:5353"
|
||||
);
|
||||
assert_eq!(config.zones.len(), 2);
|
||||
|
||||
let (domain, records) = config
|
||||
.zones
|
||||
.get_key_value("et.internal")
|
||||
.map_or(Err(anyhow!("parse error")), |x| Ok(x))?;
|
||||
assert_eq!(domain, "et.internal");
|
||||
assert_eq!(records.len(), 1);
|
||||
let record = &records[0];
|
||||
assert_eq!(record.rr_type, RecordType::A);
|
||||
assert_eq!(record.name, "www");
|
||||
assert_eq!(record.value, "123.123.123.123");
|
||||
assert_eq!(record.ttl.as_secs(), 60);
|
||||
|
||||
let (domain, records) = config
|
||||
.zones
|
||||
.get_key_value("et.top")
|
||||
.map_or(Err(anyhow!("parse error")), |x| Ok(x))?;
|
||||
assert_eq!(domain, "et.top");
|
||||
assert_eq!(records.len(), 1);
|
||||
let record = &records[0];
|
||||
assert_eq!(record.rr_type, RecordType::A);
|
||||
assert_eq!(record.name, "@");
|
||||
assert_eq!(record.value, "100.100.100.100");
|
||||
assert_eq!(record.ttl.as_secs(), 61);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
15
easytier/src/instance/dns_server/mod.rs
Normal file
15
easytier/src/instance/dns_server/mod.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
// This module is copy and modified from https://github.com/fanyang89/libdns
|
||||
pub(crate) mod config;
|
||||
pub(crate) mod server;
|
||||
|
||||
pub mod client_instance;
|
||||
pub mod runner;
|
||||
pub mod server_instance;
|
||||
pub mod system_config;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub static MAGIC_DNS_INSTANCE_ADDR: &str = "tcp://127.0.0.1:49813";
|
||||
pub static MAGIC_DNS_FAKE_IP: &str = "100.100.100.101";
|
||||
pub static DEFAULT_ET_DNS_ZONE: &str = "et.net.";
|
||||
93
easytier/src/instance/dns_server/runner.rs
Normal file
93
easytier/src/instance/dns_server/runner.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
use cidr::Ipv4Inet;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::peers::peer_manager::PeerManager;
|
||||
use std::{net::Ipv4Addr, sync::Arc, time::Duration};
|
||||
|
||||
use super::{client_instance::MagicDnsClientInstance, server_instance::MagicDnsServerInstance};
|
||||
|
||||
static DEFAULT_ET_DNS_ZONE: &str = "et.net.";
|
||||
|
||||
pub struct DnsRunner {
|
||||
client: Option<MagicDnsClientInstance>,
|
||||
server: Option<MagicDnsServerInstance>,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
tun_dev: Option<String>,
|
||||
tun_inet: Ipv4Inet,
|
||||
fake_ip: Ipv4Addr,
|
||||
}
|
||||
|
||||
impl DnsRunner {
|
||||
pub fn new(
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
tun_dev: Option<String>,
|
||||
tun_inet: Ipv4Inet,
|
||||
fake_ip: Ipv4Addr,
|
||||
) -> Self {
|
||||
Self {
|
||||
client: None,
|
||||
server: None,
|
||||
peer_mgr,
|
||||
tun_dev,
|
||||
tun_inet,
|
||||
fake_ip,
|
||||
}
|
||||
}
|
||||
|
||||
async fn clean_env(&mut self) {
|
||||
if let Some(server) = self.server.take() {
|
||||
server.clean_env().await;
|
||||
}
|
||||
self.client.take();
|
||||
}
|
||||
|
||||
async fn run_once(&mut self) -> anyhow::Result<()> {
|
||||
// try server first
|
||||
match MagicDnsServerInstance::new(
|
||||
self.peer_mgr.clone(),
|
||||
self.tun_dev.clone(),
|
||||
self.tun_inet,
|
||||
self.fake_ip,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(server) => {
|
||||
self.server = Some(server);
|
||||
tracing::info!("DnsRunner::run_once: server started");
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("DnsRunner::run_once: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// every runner must run a client
|
||||
let client = MagicDnsClientInstance::new(self.peer_mgr.clone()).await?;
|
||||
self.client = Some(client);
|
||||
self.client.as_mut().unwrap().run_and_wait().await;
|
||||
|
||||
return Err(anyhow::anyhow!("Client instance exit"));
|
||||
}
|
||||
|
||||
pub async fn run(&mut self, canel_token: CancellationToken) {
|
||||
loop {
|
||||
tracing::info!("DnsRunner::run: start");
|
||||
tokio::select! {
|
||||
_ = canel_token.cancelled() => {
|
||||
self.clean_env().await;
|
||||
tracing::info!("DnsRunner::run: cancelled");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = self.run_once() => {
|
||||
self.clean_env().await;
|
||||
if let Err(e) = ret {
|
||||
tracing::error!("DnsRunner::run: {:?}", e);
|
||||
} else {
|
||||
tracing::info!("DnsRunner::run: unexpected exit, server may be down");
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
338
easytier/src/instance/dns_server/server.rs
Normal file
338
easytier/src/instance/dns_server/server.rs
Normal file
@@ -0,0 +1,338 @@
|
||||
use anyhow::{Context, Result};
|
||||
use hickory_proto::op::Edns;
|
||||
use hickory_proto::rr;
|
||||
use hickory_proto::rr::LowerName;
|
||||
use hickory_resolver::config::ResolverOpts;
|
||||
use hickory_resolver::name_server::TokioConnectionProvider;
|
||||
use hickory_resolver::system_conf::read_system_conf;
|
||||
use hickory_server::authority::{AuthorityObject, Catalog, ZoneType};
|
||||
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
||||
use hickory_server::store::forwarder::ForwardConfig;
|
||||
use hickory_server::store::{forwarder::ForwardAuthority, in_memory::InMemoryAuthority};
|
||||
use hickory_server::ServerFuture;
|
||||
use std::io;
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::{TcpListener, UdpSocket};
|
||||
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
use crate::common::dns::get_default_resolver_config;
|
||||
|
||||
use super::config::{GeneralConfig, Record, RunConfig};
|
||||
|
||||
pub struct Server {
|
||||
server: ServerFuture<CatalogRequestHandler>,
|
||||
catalog: Arc<RwLock<Catalog>>,
|
||||
general_config: GeneralConfig,
|
||||
udp_local_addr: Option<SocketAddr>,
|
||||
tcp_local_addr: Option<SocketAddr>,
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
|
||||
struct CatalogRequestHandler {
|
||||
catalog: Arc<RwLock<Catalog>>,
|
||||
}
|
||||
|
||||
impl CatalogRequestHandler {
|
||||
fn new(catalog: Arc<RwLock<Catalog>>) -> CatalogRequestHandler {
|
||||
// let system_conf = read_system_conf();
|
||||
// let recursor = match system_conf {
|
||||
// Ok((conf, _)) => RecursorBuilder::default().build(conf),
|
||||
// Err(_) => RecursorBuilder::default().build(get_default_resolver_config()),
|
||||
// }
|
||||
// // policy is security unware, this will never return an error
|
||||
// .unwrap();
|
||||
|
||||
Self { catalog }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RequestHandler for CatalogRequestHandler {
|
||||
async fn handle_request<R: ResponseHandler>(
|
||||
&self,
|
||||
request: &Request,
|
||||
response_handle: R,
|
||||
) -> ResponseInfo {
|
||||
self.catalog
|
||||
.read()
|
||||
.await
|
||||
.handle_request(request, response_handle)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_authority(domain: &str, records: &[Record]) -> Result<InMemoryAuthority> {
|
||||
let zone = rr::Name::from_str(domain)?;
|
||||
let mut authority = InMemoryAuthority::empty(zone.clone(), ZoneType::Primary, false);
|
||||
for record in records.iter() {
|
||||
let r = record.try_into()?;
|
||||
authority.upsert_mut(r, 0);
|
||||
}
|
||||
Ok(authority)
|
||||
}
|
||||
|
||||
impl Server {
|
||||
pub fn new(config: RunConfig) -> Self {
|
||||
Self::try_new(config).unwrap()
|
||||
}
|
||||
|
||||
fn try_new(config: RunConfig) -> Result<Self> {
|
||||
let mut catalog = Catalog::new();
|
||||
for (domain, records) in config.zones().iter() {
|
||||
let zone = rr::Name::from_str(domain.as_str())?;
|
||||
let authroty = build_authority(domain, records)?;
|
||||
catalog.upsert(zone.clone().into(), vec![Arc::new(authroty)]);
|
||||
}
|
||||
|
||||
// use forwarder authority for the root zone
|
||||
let system_conf =
|
||||
read_system_conf().unwrap_or((get_default_resolver_config(), ResolverOpts::default()));
|
||||
let forward_config = ForwardConfig {
|
||||
name_servers: system_conf
|
||||
.0
|
||||
.name_servers()
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|x| {
|
||||
!config
|
||||
.excluded_forward_nameservers()
|
||||
.contains(&x.socket_addr.ip())
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.into(),
|
||||
options: Some(system_conf.1),
|
||||
};
|
||||
let auth = ForwardAuthority::builder_with_config(
|
||||
forward_config,
|
||||
TokioConnectionProvider::default(),
|
||||
)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
catalog.upsert(rr::Name::from_str(".")?.into(), vec![Arc::new(auth)]);
|
||||
|
||||
let catalog = Arc::new(RwLock::new(catalog));
|
||||
let handler = CatalogRequestHandler::new(catalog.clone());
|
||||
let server = ServerFuture::new(handler);
|
||||
|
||||
Ok(Self {
|
||||
server,
|
||||
catalog,
|
||||
general_config: config.general().clone(),
|
||||
udp_local_addr: None,
|
||||
tcp_local_addr: None,
|
||||
tasks: JoinSet::new(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn udp_local_addr(&self) -> Option<SocketAddr> {
|
||||
self.udp_local_addr
|
||||
}
|
||||
|
||||
pub fn tcp_local_addr(&self) -> Option<SocketAddr> {
|
||||
self.tcp_local_addr
|
||||
}
|
||||
|
||||
pub async fn register_udp_socket(&mut self, address: String) -> Result<SocketAddr> {
|
||||
let bind_addr = SocketAddr::from_str(&address)
|
||||
.with_context(|| format!("DNS Server failed to parse address {}", address))?;
|
||||
let socket = socket2::Socket::new(
|
||||
socket2::Domain::IPV4,
|
||||
socket2::Type::DGRAM,
|
||||
Some(socket2::Protocol::UDP),
|
||||
)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"DNS Server failed to create UDP socket for address {}",
|
||||
address.to_string()
|
||||
)
|
||||
})?;
|
||||
socket2::SockRef::from(&socket)
|
||||
.set_reuse_address(true)
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"DNS Server failed to set reuse address on socket {}",
|
||||
address.to_string()
|
||||
)
|
||||
})?;
|
||||
socket.bind(&bind_addr.into()).with_context(|| {
|
||||
format!("DNS Server failed to bind socket to address {}", bind_addr)
|
||||
})?;
|
||||
socket
|
||||
.set_nonblocking(true)
|
||||
.with_context(|| format!("DNS Server failed to set socket to non-blocking"))?;
|
||||
let socket = UdpSocket::from_std(socket.into()).with_context(|| {
|
||||
format!(
|
||||
"DNS Server failed to convert socket to UdpSocket for address {}",
|
||||
address.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let local_addr = socket
|
||||
.local_addr()
|
||||
.with_context(|| format!("DNS Server failed to get local address"))?;
|
||||
self.server.register_socket(socket);
|
||||
|
||||
Ok(local_addr)
|
||||
}
|
||||
|
||||
pub async fn run(&mut self) -> Result<()> {
|
||||
if let Some(address) = self.general_config.listen_tcp() {
|
||||
let tcp_listener = TcpListener::bind(address.clone())
|
||||
.await
|
||||
.with_context(|| format!("DNS Server failed to bind TCP address {}", address))?;
|
||||
self.tcp_local_addr = Some(tcp_listener.local_addr()?);
|
||||
self.server
|
||||
.register_listener(tcp_listener, Duration::from_secs(5));
|
||||
}
|
||||
|
||||
if let Some(address) = self.general_config.listen_udp() {
|
||||
let local_addr = self.register_udp_socket(address.clone()).await?;
|
||||
self.udp_local_addr = Some(local_addr);
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn shutdown(&mut self) -> Result<()> {
|
||||
self.server.shutdown_gracefully().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn upsert(&self, name: LowerName, authority: Arc<dyn AuthorityObject>) {
|
||||
self.catalog.write().await.upsert(name, vec![authority]);
|
||||
}
|
||||
|
||||
pub async fn remove(&self, name: &LowerName) -> Option<Vec<Arc<dyn AuthorityObject>>> {
|
||||
self.catalog.write().await.remove(name)
|
||||
}
|
||||
|
||||
pub async fn update<R: ResponseHandler>(
|
||||
&self,
|
||||
update: &Request,
|
||||
response_edns: Option<Edns>,
|
||||
response_handle: R,
|
||||
) -> io::Result<ResponseInfo> {
|
||||
self.catalog
|
||||
.write()
|
||||
.await
|
||||
.update(update, response_edns, response_handle)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn contains(&self, name: &LowerName) -> bool {
|
||||
self.catalog.read().await.contains(name)
|
||||
}
|
||||
|
||||
pub async fn lookup<R: ResponseHandler>(
|
||||
&self,
|
||||
request: &Request,
|
||||
response_edns: Option<Edns>,
|
||||
response_handle: R,
|
||||
) -> ResponseInfo {
|
||||
self.catalog
|
||||
.read()
|
||||
.await
|
||||
.lookup(request, response_edns, response_handle)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn read_catalog(&self) -> RwLockReadGuard<'_, Catalog> {
|
||||
self.catalog.read().await
|
||||
}
|
||||
|
||||
pub async fn write_catalog(&self) -> RwLockWriteGuard<'_, Catalog> {
|
||||
self.catalog.write().await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::instance::dns_server::config::{
|
||||
GeneralConfigBuilder, RecordBuilder, RecordType, RunConfigBuilder,
|
||||
};
|
||||
use anyhow::Result;
|
||||
use hickory_client::client::{Client, ClientHandle};
|
||||
use hickory_proto::rr;
|
||||
use hickory_proto::runtime::TokioRuntimeProvider;
|
||||
use hickory_proto::udp::UdpClientStream;
|
||||
use maplit::hashmap;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn it_works() -> Result<()> {
|
||||
let mut server = Server::new(
|
||||
RunConfigBuilder::default()
|
||||
.general(GeneralConfigBuilder::default().build()?)
|
||||
.build()?,
|
||||
);
|
||||
server.run().await?;
|
||||
server.shutdown().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_resolve_records() -> Result<()> {
|
||||
let configured_record = RecordBuilder::default()
|
||||
.rr_type(RecordType::A)
|
||||
.name("www.et.internal.".to_string())
|
||||
.value("123.123.123.123".to_string())
|
||||
.ttl(Duration::from_secs(60))
|
||||
.build()?;
|
||||
let configured_record2 = RecordBuilder::default()
|
||||
.rr_type(RecordType::A)
|
||||
.name("中文.et.internal.".to_string())
|
||||
.value("123.123.123.123".to_string())
|
||||
.ttl(Duration::from_secs(60))
|
||||
.build()?;
|
||||
let soa_record = RecordBuilder::default()
|
||||
.rr_type(RecordType::SOA)
|
||||
.name("et.internal.".to_string())
|
||||
.value(
|
||||
"ns.et.internal. hostmaster.et.internal. 2023101001 7200 3600 1209600 86400"
|
||||
.to_string(),
|
||||
)
|
||||
.ttl(Duration::from_secs(60))
|
||||
.build()?;
|
||||
let config = RunConfigBuilder::default()
|
||||
.general(
|
||||
GeneralConfigBuilder::default()
|
||||
.listen_udp("127.0.0.1:0")
|
||||
.build()?,
|
||||
)
|
||||
.zones(hashmap! {
|
||||
"et.internal.".to_string() => vec![configured_record.clone(), soa_record.clone(), configured_record2.clone()],
|
||||
})
|
||||
.build()?;
|
||||
|
||||
let mut server = Server::new(config);
|
||||
server.run().await?;
|
||||
|
||||
let local_addr = server.udp_local_addr().unwrap();
|
||||
let stream = UdpClientStream::builder(local_addr, TokioRuntimeProvider::default()).build();
|
||||
let (mut client, background) = Client::connect(stream).await?;
|
||||
let background_task = tokio::spawn(background);
|
||||
let response = client
|
||||
.query(
|
||||
rr::Name::from_str("www.et.internal")?,
|
||||
rr::DNSClass::IN,
|
||||
rr::RecordType::A,
|
||||
)
|
||||
.await?;
|
||||
drop(background_task);
|
||||
|
||||
println!("Response: {:?}", response);
|
||||
|
||||
assert_eq!(response.answers().len(), 1);
|
||||
let expected_record: rr::Record = configured_record.try_into()?;
|
||||
assert_eq!(response.answers().first().unwrap(), &expected_record);
|
||||
|
||||
server.shutdown().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
446
easytier/src/instance/dns_server/server_instance.rs
Normal file
446
easytier/src/instance/dns_server/server_instance.rs
Normal file
@@ -0,0 +1,446 @@
|
||||
// single-instance server in one machine, every easytier instance that has ip address and tun device will try create a server instance.
|
||||
|
||||
// magic dns client will connect to this server to update the dns records.
|
||||
// magic dns server will add the dns server ip address to the tun device, and forward the dns request to the dns server
|
||||
|
||||
// magic dns client will establish a long live tcp connection to the magic dns server, and when the server stops or crashes,
|
||||
// all the clients will exit and let the easytier instance to launch a new server instance.
|
||||
|
||||
use std::{collections::BTreeMap, net::Ipv4Addr, str::FromStr, sync::Arc, time::Duration};
|
||||
|
||||
use anyhow::Context;
|
||||
use cidr::Ipv4Inet;
|
||||
use dashmap::DashMap;
|
||||
use hickory_proto::rr::LowerName;
|
||||
use multimap::MultiMap;
|
||||
use pnet::packet::{
|
||||
icmp::{self, IcmpTypes, MutableIcmpPacket},
|
||||
ip::IpNextHeaderProtocols,
|
||||
ipv4::{self, MutableIpv4Packet},
|
||||
tcp::{self, MutableTcpPacket},
|
||||
udp::{self, MutableUdpPacket},
|
||||
MutablePacket,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
ifcfg::{IfConfiger, IfConfiguerTrait},
|
||||
PeerId,
|
||||
},
|
||||
instance::dns_server::{
|
||||
config::{Record, RecordBuilder, RecordType},
|
||||
server::build_authority,
|
||||
DEFAULT_ET_DNS_ZONE,
|
||||
},
|
||||
peers::{peer_manager::PeerManager, NicPacketFilter},
|
||||
proto::{
|
||||
cli::Route,
|
||||
common::{TunnelInfo, Void},
|
||||
magic_dns::{
|
||||
dns_record::{self},
|
||||
DnsRecord, DnsRecordA, DnsRecordList, GetDnsRecordResponse, HandshakeRequest,
|
||||
HandshakeResponse, MagicDnsServerRpc, MagicDnsServerRpcServer, UpdateDnsRecordRequest,
|
||||
},
|
||||
rpc_impl::standalone::{RpcServerHook, StandAloneServer},
|
||||
rpc_types::controller::{BaseController, Controller},
|
||||
},
|
||||
tunnel::{packet_def::ZCPacket, tcp::TcpTunnelListener},
|
||||
};
|
||||
|
||||
use super::{
|
||||
config::{GeneralConfigBuilder, RunConfigBuilder},
|
||||
server::Server,
|
||||
system_config::{OSConfig, SystemConfig},
|
||||
MAGIC_DNS_INSTANCE_ADDR,
|
||||
};
|
||||
|
||||
static NIC_PIPELINE_NAME: &str = "magic_dns_server";
|
||||
|
||||
pub(super) struct MagicDnsServerInstanceData {
|
||||
dns_server: Server,
|
||||
tun_dev: Option<String>,
|
||||
tun_ip: Ipv4Addr,
|
||||
fake_ip: Ipv4Addr,
|
||||
my_peer_id: PeerId,
|
||||
|
||||
// zone -> (tunnel remote addr -> route)
|
||||
route_infos: DashMap<String, MultiMap<url::Url, Route>>,
|
||||
|
||||
system_config: Option<Box<dyn SystemConfig>>,
|
||||
}
|
||||
|
||||
impl MagicDnsServerInstanceData {
|
||||
pub async fn update_dns_records<'a, T: Iterator<Item = &'a Route>>(
|
||||
&self,
|
||||
routes: T,
|
||||
zone: &str,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let mut records: Vec<Record> = vec![];
|
||||
for route in routes {
|
||||
if route.hostname.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let Some(ipv4_addr) = route.ipv4_addr.unwrap_or_default().address else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let record = RecordBuilder::default()
|
||||
.rr_type(RecordType::A)
|
||||
.name(format!("{}.{}", route.hostname, zone))
|
||||
.value(ipv4_addr.to_string())
|
||||
.ttl(Duration::from_secs(1))
|
||||
.build()?;
|
||||
|
||||
records.push(record);
|
||||
}
|
||||
|
||||
let soa_record = RecordBuilder::default()
|
||||
.rr_type(RecordType::SOA)
|
||||
.name(zone.to_string())
|
||||
.value(format!(
|
||||
"ns.{} hostmaster.{} 2023101001 7200 3600 1209600 86400",
|
||||
zone, zone
|
||||
))
|
||||
.ttl(Duration::from_secs(60))
|
||||
.build()?;
|
||||
records.push(soa_record);
|
||||
|
||||
let authority = build_authority(zone, &records)?;
|
||||
|
||||
self.dns_server
|
||||
.upsert(
|
||||
LowerName::from_str(zone)
|
||||
.with_context(|| "Invalid zone name, expect fomat like \"et.net.\"")?,
|
||||
Arc::new(authority),
|
||||
)
|
||||
.await;
|
||||
|
||||
tracing::debug!("Updated DNS records for zone {}: {:?}", zone, records);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update(&self) {
|
||||
for item in self.route_infos.iter() {
|
||||
let zone = item.key();
|
||||
let route_iter = item.value().flat_iter().map(|x| x.1);
|
||||
if let Err(e) = self.update_dns_records(route_iter, zone).await {
|
||||
tracing::error!("Failed to update DNS records for zone {}: {:?}", zone, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn do_system_config(&self, zone: &str) -> Result<(), anyhow::Error> {
|
||||
if let Some(c) = &self.system_config {
|
||||
c.set_dns(&OSConfig {
|
||||
nameservers: vec![self.fake_ip.to_string()],
|
||||
search_domains: vec![zone.to_string()],
|
||||
match_domains: vec![zone.to_string()],
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl MagicDnsServerRpc for MagicDnsServerInstanceData {
|
||||
type Controller = BaseController;
|
||||
async fn handshake(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
_input: HandshakeRequest,
|
||||
) -> crate::proto::rpc_types::error::Result<HandshakeResponse> {
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
async fn update_dns_record(
|
||||
&self,
|
||||
ctrl: Self::Controller,
|
||||
input: UpdateDnsRecordRequest,
|
||||
) -> crate::proto::rpc_types::error::Result<Void> {
|
||||
let Some(tunnel_info) = ctrl.get_tunnel_info() else {
|
||||
return Err(anyhow::anyhow!("No tunnel info").into());
|
||||
};
|
||||
let Some(remote_addr) = &tunnel_info.remote_addr else {
|
||||
return Err(anyhow::anyhow!("No remote addr").into());
|
||||
};
|
||||
let zone = input.zone.clone();
|
||||
self.route_infos
|
||||
.entry(zone.clone())
|
||||
.or_default()
|
||||
.insert_many(remote_addr.clone().into(), input.routes);
|
||||
|
||||
self.update().await;
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
async fn get_dns_record(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
_input: Void,
|
||||
) -> crate::proto::rpc_types::error::Result<GetDnsRecordResponse> {
|
||||
let mut ret = BTreeMap::new();
|
||||
for item in self.route_infos.iter() {
|
||||
let zone = item.key();
|
||||
let routes = item.value();
|
||||
let mut dns_records = DnsRecordList::default();
|
||||
for route in routes.iter().map(|x| x.1) {
|
||||
dns_records.records.push(DnsRecord {
|
||||
record: Some(dns_record::Record::A(DnsRecordA {
|
||||
name: format!("{}.{}", route.hostname, zone),
|
||||
value: route.ipv4_addr.unwrap_or_default().address,
|
||||
ttl: 1,
|
||||
})),
|
||||
});
|
||||
}
|
||||
ret.insert(zone.clone(), dns_records);
|
||||
}
|
||||
Ok(GetDnsRecordResponse { records: ret })
|
||||
}
|
||||
|
||||
async fn heartbeat(
|
||||
&self,
|
||||
_ctrl: Self::Controller,
|
||||
_input: Void,
|
||||
) -> crate::proto::rpc_types::error::Result<Void> {
|
||||
Ok(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NicPacketFilter for MagicDnsServerInstanceData {
|
||||
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
|
||||
let data = zc_packet.mut_payload();
|
||||
let mut ip_packet = MutableIpv4Packet::new(data).unwrap();
|
||||
if ip_packet.get_version() != 4 || ip_packet.get_destination() != self.fake_ip {
|
||||
return false;
|
||||
}
|
||||
|
||||
match ip_packet.get_next_level_protocol() {
|
||||
IpNextHeaderProtocols::Udp => {
|
||||
let Some(dns_udp_addr) = self.dns_server.udp_local_addr() else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let Some(mut udp_packet) = MutableUdpPacket::new(ip_packet.payload_mut()) else {
|
||||
return false;
|
||||
};
|
||||
if udp_packet.get_destination() == 53 {
|
||||
// for dns request
|
||||
udp_packet.set_destination(dns_udp_addr.port());
|
||||
} else if udp_packet.get_source() == dns_udp_addr.port() {
|
||||
// for dns response
|
||||
udp_packet.set_source(53);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
udp_packet.set_checksum(udp::ipv4_checksum(
|
||||
&udp_packet.to_immutable(),
|
||||
&self.fake_ip,
|
||||
&self.tun_ip,
|
||||
));
|
||||
}
|
||||
|
||||
IpNextHeaderProtocols::Tcp => {
|
||||
let Some(dns_tcp_addr) = self.dns_server.tcp_local_addr() else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let Some(mut tcp_packet) = MutableTcpPacket::new(ip_packet.payload_mut()) else {
|
||||
return false;
|
||||
};
|
||||
if tcp_packet.get_destination() == 53 {
|
||||
// for dns request
|
||||
tcp_packet.set_destination(dns_tcp_addr.port());
|
||||
} else if tcp_packet.get_source() == dns_tcp_addr.port() {
|
||||
// for dns response
|
||||
tcp_packet.set_source(53);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
tcp_packet.set_checksum(tcp::ipv4_checksum(
|
||||
&tcp_packet.to_immutable(),
|
||||
&self.fake_ip,
|
||||
&self.tun_ip,
|
||||
));
|
||||
}
|
||||
|
||||
IpNextHeaderProtocols::Icmp => {
|
||||
let Some(mut icmp_packet) = MutableIcmpPacket::new(ip_packet.payload_mut()) else {
|
||||
return false;
|
||||
};
|
||||
if icmp_packet.get_icmp_type() != IcmpTypes::EchoRequest {
|
||||
return false;
|
||||
}
|
||||
icmp_packet.set_icmp_type(IcmpTypes::EchoReply);
|
||||
icmp_packet.set_checksum(icmp::checksum(&icmp_packet.to_immutable()));
|
||||
}
|
||||
|
||||
_ => {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
ip_packet.set_source(self.fake_ip);
|
||||
ip_packet.set_destination(self.tun_ip);
|
||||
|
||||
ip_packet.set_checksum(ipv4::checksum(&ip_packet.to_immutable()));
|
||||
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.my_peer_id.into();
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
fn id(&self) -> String {
|
||||
NIC_PIPELINE_NAME.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RpcServerHook for MagicDnsServerInstanceData {
|
||||
async fn on_new_client(&self, tunnel_info: Option<TunnelInfo>) {
|
||||
println!("New client connected: {:?}", tunnel_info);
|
||||
}
|
||||
|
||||
async fn on_client_disconnected(&self, tunnel_info: Option<TunnelInfo>) {
|
||||
println!("Client disconnected: {:?}", tunnel_info);
|
||||
let Some(tunnel_info) = tunnel_info else {
|
||||
return;
|
||||
};
|
||||
let Some(remote_addr) = tunnel_info.remote_addr else {
|
||||
return;
|
||||
};
|
||||
let remote_addr = remote_addr.into();
|
||||
for mut item in self.route_infos.iter_mut() {
|
||||
item.value_mut().remove(&remote_addr);
|
||||
}
|
||||
self.route_infos.retain(|_, v| !v.is_empty());
|
||||
self.update().await;
|
||||
}
|
||||
}
|
||||
|
||||
pub struct MagicDnsServerInstance {
|
||||
rpc_server: StandAloneServer<TcpTunnelListener>,
|
||||
pub(super) data: Arc<MagicDnsServerInstanceData>,
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
tun_inet: Ipv4Inet,
|
||||
}
|
||||
|
||||
fn get_system_config(
|
||||
_tun_name: Option<&str>,
|
||||
) -> Result<Option<Box<dyn SystemConfig>>, anyhow::Error> {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
use super::system_config::windows::WindowsDNSManager;
|
||||
let tun_name = _tun_name.ok_or_else(|| anyhow::anyhow!("No tun name"))?;
|
||||
return Ok(Some(Box::new(WindowsDNSManager::new(tun_name)?)));
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
use super::system_config::darwin::DarwinConfigurator;
|
||||
return Ok(Some(Box::new(DarwinConfigurator::new())));
|
||||
}
|
||||
|
||||
#[allow(unreachable_code)]
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
impl MagicDnsServerInstance {
|
||||
pub async fn new(
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
tun_dev: Option<String>,
|
||||
tun_inet: Ipv4Inet,
|
||||
fake_ip: Ipv4Addr,
|
||||
) -> Result<Self, anyhow::Error> {
|
||||
let tcp_listener = TcpTunnelListener::new(MAGIC_DNS_INSTANCE_ADDR.parse().unwrap());
|
||||
let mut rpc_server = StandAloneServer::new(tcp_listener);
|
||||
rpc_server.serve().await?;
|
||||
|
||||
let bind_addr = tun_inet.address();
|
||||
|
||||
let dns_config = RunConfigBuilder::default()
|
||||
.general(
|
||||
GeneralConfigBuilder::default()
|
||||
.listen_udp(format!("{}:0", bind_addr))
|
||||
.listen_tcp(format!("{}:0", bind_addr))
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.excluded_forward_nameservers(vec![fake_ip.into()])
|
||||
.build()
|
||||
.unwrap();
|
||||
let mut dns_server = Server::new(dns_config);
|
||||
dns_server.run().await?;
|
||||
|
||||
if !tun_inet.contains(&fake_ip) && tun_dev.is_some() {
|
||||
let cost = if cfg!(target_os = "windows") {
|
||||
Some(4)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let ifcfg = IfConfiger {};
|
||||
ifcfg
|
||||
.add_ipv4_route(tun_dev.as_ref().unwrap(), fake_ip, 32, cost)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let data = Arc::new(MagicDnsServerInstanceData {
|
||||
dns_server,
|
||||
tun_dev: tun_dev.clone(),
|
||||
tun_ip: tun_inet.address(),
|
||||
fake_ip,
|
||||
my_peer_id: peer_mgr.my_peer_id(),
|
||||
route_infos: DashMap::new(),
|
||||
system_config: get_system_config(tun_dev.as_deref())?,
|
||||
});
|
||||
|
||||
rpc_server
|
||||
.registry()
|
||||
.register(MagicDnsServerRpcServer::new(data.clone()), "");
|
||||
rpc_server.set_hook(data.clone());
|
||||
|
||||
peer_mgr
|
||||
.add_nic_packet_process_pipeline(Box::new(data.clone()))
|
||||
.await;
|
||||
|
||||
let data_clone = data.clone();
|
||||
tokio::task::spawn_blocking(move || data_clone.do_system_config(DEFAULT_ET_DNS_ZONE))
|
||||
.await
|
||||
.context("Failed to configure system")??;
|
||||
|
||||
Ok(Self {
|
||||
rpc_server,
|
||||
data,
|
||||
peer_mgr,
|
||||
tun_inet,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn clean_env(&self) {
|
||||
if let Some(configer) = &self.data.system_config {
|
||||
let ret = configer.close();
|
||||
if let Err(e) = ret {
|
||||
tracing::error!("Failed to close system config: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if !self.tun_inet.contains(&self.data.fake_ip) && self.data.tun_dev.is_some() {
|
||||
let ifcfg = IfConfiger {};
|
||||
let _ = ifcfg
|
||||
.remove_ipv4_route(&self.data.tun_dev.as_ref().unwrap(), self.data.fake_ip, 32)
|
||||
.await;
|
||||
}
|
||||
|
||||
let _ = self
|
||||
.peer_mgr
|
||||
.remove_nic_packet_process_pipeline(NIC_PIPELINE_NAME.to_string())
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for MagicDnsServerInstance {
|
||||
fn drop(&mut self) {
|
||||
println!("MagicDnsServerInstance dropped");
|
||||
}
|
||||
}
|
||||
135
easytier/src/instance/dns_server/system_config/darwin.rs
Normal file
135
easytier/src/instance/dns_server/system_config/darwin.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fs::{self, OpenOptions},
|
||||
io::{self, Write},
|
||||
os::unix::fs::PermissionsExt,
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use super::{OSConfig, SystemConfig};
|
||||
|
||||
const MAC_RESOLVER_FILE_HEADER: &str = "# Added by easytier\n";
|
||||
const ETC_RESOLVER: &str = "/etc/resolver";
|
||||
const ETC_RESOLV_CONF: &str = "/etc/resolv.conf";
|
||||
|
||||
pub struct DarwinConfigurator {}
|
||||
|
||||
impl DarwinConfigurator {
|
||||
pub fn new() -> Self {
|
||||
DarwinConfigurator {}
|
||||
}
|
||||
|
||||
pub fn do_close(&self) -> io::Result<()> {
|
||||
self.remove_resolver_files(|_| true)
|
||||
}
|
||||
|
||||
pub fn supports_split_dns(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub fn do_set_dns(&self, cfg: &OSConfig) -> io::Result<()> {
|
||||
fs::create_dir_all(ETC_RESOLVER)?;
|
||||
let mut keep = HashSet::new();
|
||||
|
||||
// 写 search.easytier 文件
|
||||
if !cfg.search_domains.is_empty() {
|
||||
let search_file = "search.easytier";
|
||||
keep.insert(search_file.to_string());
|
||||
let mut content = String::from(MAC_RESOLVER_FILE_HEADER);
|
||||
content.push_str("search");
|
||||
for domain in &cfg.search_domains {
|
||||
content.push(' ');
|
||||
content.push_str(domain.trim_end_matches('.'));
|
||||
}
|
||||
content.push('\n');
|
||||
Self::write_resolver_file(search_file, &content)?;
|
||||
}
|
||||
|
||||
// 写 match_domains 文件
|
||||
let mut ns_content = String::from(MAC_RESOLVER_FILE_HEADER);
|
||||
for ns in &cfg.nameservers {
|
||||
ns_content.push_str(&format!("nameserver {}\n", ns));
|
||||
}
|
||||
for domain in &cfg.match_domains {
|
||||
let file_base = domain.trim_end_matches('.');
|
||||
keep.insert(file_base.to_string());
|
||||
Self::write_resolver_file(file_base, &ns_content)?;
|
||||
}
|
||||
// 删除未保留的 resolver 文件
|
||||
self.remove_resolver_files(|domain| !keep.contains(domain))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_resolver_file(file_name: &str, content: &str) -> io::Result<()> {
|
||||
let path = Path::new(ETC_RESOLVER).join(file_name);
|
||||
let mut file = OpenOptions::new()
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.write(true)
|
||||
.open(&path)?;
|
||||
file.set_permissions(fs::Permissions::from_mode(0o644))?;
|
||||
file.write_all(content.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_resolver_files<F>(&self, should_delete: F) -> io::Result<()>
|
||||
where
|
||||
F: Fn(&str) -> bool,
|
||||
{
|
||||
let entries = match fs::read_dir(ETC_RESOLVER) {
|
||||
Ok(e) => e,
|
||||
Err(e) if e.kind() == io::ErrorKind::NotFound => return Ok(()),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
for entry in entries {
|
||||
let entry = entry?;
|
||||
let file_type = entry.file_type()?;
|
||||
if !file_type.is_file() {
|
||||
continue;
|
||||
}
|
||||
let name = entry.file_name();
|
||||
let name_str = name.to_string_lossy();
|
||||
if !should_delete(&name_str) {
|
||||
continue;
|
||||
}
|
||||
let full_path = entry.path();
|
||||
let content = fs::read_to_string(&full_path)?;
|
||||
if !content.starts_with(MAC_RESOLVER_FILE_HEADER) {
|
||||
continue;
|
||||
}
|
||||
fs::remove_file(&full_path)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemConfig for DarwinConfigurator {
|
||||
fn set_dns(&self, cfg: &OSConfig) -> io::Result<()> {
|
||||
self.do_set_dns(cfg)
|
||||
}
|
||||
|
||||
fn close(&self) -> io::Result<()> {
|
||||
self.do_close()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn set_dns_test() -> io::Result<()> {
|
||||
let config = OSConfig {
|
||||
nameservers: vec!["8.8.8.8".into()],
|
||||
search_domains: vec!["example.com".into()],
|
||||
match_domains: vec!["test.local".into()],
|
||||
};
|
||||
let configurator = DarwinConfigurator::new();
|
||||
|
||||
configurator.set_dns(&config)?;
|
||||
configurator.close()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
357
easytier/src/instance/dns_server/system_config/linux.rs
Normal file
357
easytier/src/instance/dns_server/system_config/linux.rs
Normal file
@@ -0,0 +1,357 @@
|
||||
// translated from tailscale #32ce1bdb48078ec4cedaeeb5b1b2ff9c0ef61a49
|
||||
|
||||
use crate::defer;
|
||||
use anyhow::{Context, Result};
|
||||
use dbus::blocking::stdintf::org_freedesktop_dbus::Properties as _;
|
||||
use std::fs;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::time::Duration;
|
||||
use version_compare::Cmp;
|
||||
|
||||
// 声明依赖项(需要添加到Cargo.toml)
|
||||
// use dbus::blocking::Connection;
|
||||
// use nix::unistd::AccessFlags;
|
||||
// use resolv_conf::Resolver;
|
||||
|
||||
// 常量定义
|
||||
const RESOLV_CONF: &str = "/etc/resolv.conf";
|
||||
const PING_TIMEOUT: Duration = Duration::from_secs(1);
|
||||
|
||||
// 错误类型定义
|
||||
#[derive(Debug)]
|
||||
struct DNSConfigError {
|
||||
message: String,
|
||||
source: Option<anyhow::Error>,
|
||||
}
|
||||
|
||||
// 配置环境结构体
|
||||
struct OSConfigEnv {
|
||||
fs: Box<dyn FileSystem>,
|
||||
dbus_ping: Box<dyn Fn(&str, &str) -> Result<()>>,
|
||||
dbus_read_string: Box<dyn Fn(&str, &str, &str, &str) -> Result<String>>,
|
||||
nm_is_using_resolved: Box<dyn Fn() -> Result<()>>,
|
||||
nm_version_between: Box<dyn Fn(&str, &str) -> Result<bool>>,
|
||||
resolvconf_style: Box<dyn Fn() -> String>,
|
||||
}
|
||||
|
||||
// DNS管理器trait
|
||||
trait OSConfigurator: Send + Sync {
|
||||
// 实现相关方法
|
||||
}
|
||||
|
||||
// 文件系统操作trait
|
||||
trait FileSystem {
|
||||
fn read_file(&self, path: &str) -> Result<Vec<u8>>;
|
||||
fn exists(&self, path: &str) -> bool;
|
||||
}
|
||||
|
||||
// 直接文件系统实现
|
||||
struct DirectFS;
|
||||
|
||||
impl FileSystem for DirectFS {
|
||||
fn read_file(&self, path: &str) -> Result<Vec<u8>> {
|
||||
fs::read(path).context("Failed to read file")
|
||||
}
|
||||
|
||||
fn exists(&self, path: &str) -> bool {
|
||||
Path::new(path).exists()
|
||||
}
|
||||
}
|
||||
|
||||
/// 检查 NetworkManager 是否使用 systemd-resolved 作为 DNS 管理器
|
||||
pub fn nm_is_using_resolved() -> Result<()> {
|
||||
// 连接系统 D-Bus
|
||||
let conn = dbus::blocking::Connection::new_system().context("Failed to connect to D-Bus")?;
|
||||
|
||||
// 创建 NetworkManager DnsManager 对象代理
|
||||
let proxy = conn.with_proxy(
|
||||
"org.freedesktop.NetworkManager",
|
||||
"/org/freedesktop/NetworkManager/DnsManager",
|
||||
std::time::Duration::from_secs(1),
|
||||
);
|
||||
|
||||
// 获取 Mode 属性
|
||||
let (value,): (dbus::arg::Variant<Box<dyn dbus::arg::RefArg + 'static>>,) = proxy
|
||||
.method_call(
|
||||
"org.freedesktop.DBus.Properties",
|
||||
"Get",
|
||||
("org.freedesktop.NetworkManager.DnsManager", "Mode"),
|
||||
)
|
||||
.context("Failed to get NM mode property")?;
|
||||
|
||||
// 检查 Mode 是否为 "systemd-resolved"
|
||||
if value.0.as_str() != Some("systemd-resolved") {
|
||||
return Err(anyhow::anyhow!(
|
||||
"NetworkManager is not using systemd-resolved, found: {:?}",
|
||||
value
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 返回系统中使用的 resolvconf 实现类型("debian" 或 "openresolv")
|
||||
pub fn resolvconf_style() -> String {
|
||||
// 检查 resolvconf 命令是否存在
|
||||
if which::which("resolvconf").is_err() {
|
||||
return String::new();
|
||||
}
|
||||
|
||||
// 执行 resolvconf --version 命令
|
||||
let output = match Command::new("resolvconf").arg("--version").output() {
|
||||
Ok(output) => output,
|
||||
Err(e) => {
|
||||
// 处理命令执行错误
|
||||
if let Some(code) = e.raw_os_error() {
|
||||
// Debian 版本的 resolvconf 不支持 --version,返回特定错误码 99
|
||||
if code == 99 {
|
||||
return "debian".to_string();
|
||||
}
|
||||
}
|
||||
return String::new(); // 其他错误返回空字符串
|
||||
}
|
||||
};
|
||||
|
||||
// 检查输出是否以 "Debian resolvconf" 开头
|
||||
if output.stdout.starts_with(b"Debian resolvconf") {
|
||||
return "debian".to_string();
|
||||
}
|
||||
|
||||
// 默认视为 openresolv
|
||||
"openresolv".to_string()
|
||||
}
|
||||
|
||||
// 构建配置环境
|
||||
fn new_os_config_env() -> OSConfigEnv {
|
||||
OSConfigEnv {
|
||||
fs: Box::new(DirectFS),
|
||||
dbus_ping: Box::new(dbus_ping),
|
||||
dbus_read_string: Box::new(dbus_read_string),
|
||||
nm_is_using_resolved: Box::new(nm_is_using_resolved),
|
||||
nm_version_between: Box::new(nm_version_between),
|
||||
resolvconf_style: Box::new(resolvconf_style),
|
||||
}
|
||||
}
|
||||
|
||||
// 创建DNS配置器
|
||||
fn new_os_configurator(_interface_name: String) -> Result<()> {
|
||||
let env = new_os_config_env();
|
||||
|
||||
let mode = dns_mode(&env).context("Failed to detect DNS mode")?;
|
||||
|
||||
tracing::info!("dns: using {} mode", mode);
|
||||
|
||||
// match mode.as_str() {
|
||||
// "direct" => Ok(Box::new(DirectManager::new(env.fs)?)),
|
||||
// // "systemd-resolved" => Ok(Box::new(ResolvedManager::new(
|
||||
// // &logf,
|
||||
// // health,
|
||||
// // interface_name,
|
||||
// // )?)),
|
||||
// // "network-manager" => Ok(Box::new(NMManager::new(interface_name)?)),
|
||||
// // "debian-resolvconf" => Ok(Box::new(DebianResolvconfManager::new(&logf)?)),
|
||||
// // "openresolv" => Ok(Box::new(OpenresolvManager::new(&logf)?)),
|
||||
// _ => {
|
||||
// tracing::warn!("Unexpected DNS mode {}, using direct manager", mode);
|
||||
// Ok(Box::new(DirectManager::new(env.fs)?))
|
||||
// }
|
||||
// }
|
||||
Ok(())
|
||||
}
|
||||
|
||||
use std::io::{self, BufRead, Cursor};
|
||||
|
||||
/// 返回 `resolv.conf` 内容的拥有者("systemd-resolved"、"NetworkManager"、"resolvconf" 或空字符串)
|
||||
pub fn resolv_owner(bs: &[u8]) -> String {
|
||||
let mut likely = String::new();
|
||||
let cursor = Cursor::new(bs);
|
||||
let reader = io::BufReader::new(cursor);
|
||||
|
||||
for line_result in reader.lines() {
|
||||
match line_result {
|
||||
Ok(line) => {
|
||||
let line = line.trim();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !line.starts_with('#') {
|
||||
// 第一个非注释且非空的行,直接返回当前结果
|
||||
return likely;
|
||||
}
|
||||
|
||||
// 检查注释行中的关键字
|
||||
if line.contains("systemd-resolved") {
|
||||
likely = "systemd-resolved".to_string();
|
||||
} else if line.contains("NetworkManager") {
|
||||
likely = "NetworkManager".to_string();
|
||||
} else if line.contains("resolvconf") {
|
||||
likely = "resolvconf".to_string();
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// 读取错误(如无效 UTF-8),直接返回当前结果
|
||||
return likely;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
likely
|
||||
}
|
||||
|
||||
// 检测DNS模式
|
||||
fn dns_mode(env: &OSConfigEnv) -> Result<String> {
|
||||
let debug = std::cell::RefCell::new(Vec::new());
|
||||
let dbg = |k: &str, v: &str| debug.borrow_mut().push((k.to_string(), v.to_string()));
|
||||
|
||||
// defer 日志记录
|
||||
defer! {
|
||||
if !debug.borrow().is_empty() {
|
||||
let log_entries: Vec<String> =
|
||||
debug.borrow().iter().map(|(k, v)| format!("{}={}", k, v)).collect();
|
||||
tracing::info!("dns: [{}]", log_entries.join(" "));
|
||||
}
|
||||
};
|
||||
|
||||
// 检查systemd-resolved状态
|
||||
let resolved_up =
|
||||
(env.dbus_ping)("org.freedesktop.resolve1", "/org/freedesktop/resolve1").is_ok();
|
||||
if resolved_up {
|
||||
dbg("resolved-ping", "yes");
|
||||
}
|
||||
|
||||
// 读取resolv.conf
|
||||
let content = match env.fs.read_file(RESOLV_CONF) {
|
||||
Ok(content) => content,
|
||||
Err(e) if e.to_string().contains("NotFound") => {
|
||||
dbg("rc", "missing");
|
||||
return Ok("direct".to_string());
|
||||
}
|
||||
Err(e) => return Err(e).context("reading /etc/resolv.conf"),
|
||||
};
|
||||
|
||||
// 检查resolv.conf所有者
|
||||
match resolv_owner(&content).as_str() {
|
||||
"systemd-resolved" => {
|
||||
dbg("rc", "resolved");
|
||||
// 检查是否实际使用resolved
|
||||
if let Err(e) = resolved_is_actually_resolver(env, &dbg, &content) {
|
||||
tracing::warn!("resolvedIsActuallyResolver error: {}", e);
|
||||
dbg("resolved", "not-in-use");
|
||||
return Ok("direct".to_string());
|
||||
}
|
||||
|
||||
// NetworkManager检查逻辑...
|
||||
|
||||
Ok("systemd-resolved".to_string())
|
||||
}
|
||||
"resolvconf" => {
|
||||
// resolvconf处理逻辑...
|
||||
Ok("debian-resolvconf".to_string())
|
||||
}
|
||||
"NetworkManager" => {
|
||||
// NetworkManager处理逻辑...
|
||||
Ok("systemd-resolved".to_string())
|
||||
}
|
||||
_ => Ok("direct".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
// D-Bus ping实现
|
||||
fn dbus_ping(name: &str, object_path: &str) -> Result<()> {
|
||||
let conn = dbus::blocking::Connection::new_system()?;
|
||||
let proxy = conn.with_proxy(name, object_path, PING_TIMEOUT);
|
||||
let _: () = proxy.method_call("org.freedesktop.DBus.Peer", "Ping", ())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// D-Bus读取字符串实现
|
||||
fn dbus_read_string(name: &str, object_path: &str, iface: &str, member: &str) -> Result<String> {
|
||||
let conn = dbus::blocking::Connection::new_system()?;
|
||||
let proxy = conn.with_proxy(name, object_path, PING_TIMEOUT);
|
||||
let (value,): (String,) =
|
||||
proxy.method_call("org.freedesktop.DBus.Properties", "Get", (iface, member))?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
// NetworkManager版本检查
|
||||
fn nm_version_between(first: &str, last: &str) -> Result<bool> {
|
||||
let conn = dbus::blocking::Connection::new_system()?;
|
||||
let proxy = conn.with_proxy(
|
||||
"org.freedesktop.NetworkManager",
|
||||
"/org/freedesktop/NetworkManager",
|
||||
PING_TIMEOUT,
|
||||
);
|
||||
|
||||
let version: String = proxy.get("org.freedesktop.NetworkManager", "Version")?;
|
||||
let cmp_first = version_compare::compare(&version, first).unwrap_or(Cmp::Lt);
|
||||
let cmp_last = version_compare::compare(&version, last).unwrap_or(Cmp::Gt);
|
||||
Ok(cmp_first == Cmp::Ge && cmp_last == Cmp::Le)
|
||||
}
|
||||
|
||||
// 检查是否实际使用systemd-resolved
|
||||
fn resolved_is_actually_resolver(
|
||||
env: &OSConfigEnv,
|
||||
dbg: &dyn Fn(&str, &str),
|
||||
content: &[u8],
|
||||
) -> Result<()> {
|
||||
if is_libnss_resolve_used(env).is_ok() {
|
||||
dbg("resolved", "nss");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 解析resolv.conf内容
|
||||
let resolver = resolv_conf::Config::parse(content)?;
|
||||
|
||||
// 检查nameserver配置
|
||||
if resolver.nameservers.is_empty() {
|
||||
return Err(anyhow::anyhow!("resolv.conf has no nameservers"));
|
||||
}
|
||||
|
||||
for ns in resolver.nameservers {
|
||||
if ns != Ipv4Addr::new(127, 0, 0, 53).into() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"resolv.conf doesn't point to systemd-resolved"
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
dbg("resolved", "file");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 检查是否使用libnss_resolve
|
||||
fn is_libnss_resolve_used(env: &OSConfigEnv) -> Result<()> {
|
||||
let content = env.fs.read_file("/etc/nsswitch.conf")?;
|
||||
|
||||
for line in String::from_utf8_lossy(&content).lines() {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.first() == Some(&"hosts:") {
|
||||
for module in parts.iter().skip(1) {
|
||||
if *module == "dns" {
|
||||
return Err(anyhow::anyhow!("dns module has higher priority"));
|
||||
}
|
||||
if *module == "resolve" {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow::anyhow!("libnss_resolve not used"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn dns_mode_test() {
|
||||
let env = new_os_config_env();
|
||||
let mode = dns_mode(&env).unwrap();
|
||||
println!("Detected DNS mode: {}", mode);
|
||||
}
|
||||
}
|
||||
20
easytier/src/instance/dns_server/system_config/mod.rs
Normal file
20
easytier/src/instance/dns_server/system_config/mod.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
#[cfg(target_os = "linux")]
|
||||
pub mod linux;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
pub mod windows;
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
pub mod darwin;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct OSConfig {
|
||||
pub nameservers: Vec<String>,
|
||||
pub search_domains: Vec<String>,
|
||||
pub match_domains: Vec<String>,
|
||||
}
|
||||
|
||||
pub trait SystemConfig: Send + Sync {
|
||||
fn set_dns(&self, cfg: &OSConfig) -> std::io::Result<()>;
|
||||
fn close(&self) -> std::io::Result<()>;
|
||||
}
|
||||
252
easytier/src/instance/dns_server/system_config/windows.rs
Normal file
252
easytier/src/instance/dns_server/system_config/windows.rs
Normal file
@@ -0,0 +1,252 @@
|
||||
use std::net::IpAddr;
|
||||
use std::process::Command;
|
||||
|
||||
use std::io;
|
||||
use winreg::RegKey;
|
||||
|
||||
use crate::common::ifcfg::RegistryManager;
|
||||
|
||||
use super::{OSConfig, SystemConfig};
|
||||
|
||||
pub fn is_windows_10_or_better() -> io::Result<bool> {
|
||||
let hklm = winreg::enums::HKEY_LOCAL_MACHINE;
|
||||
let key_path = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion";
|
||||
let key = winreg::RegKey::predef(hklm).open_subkey(key_path)?;
|
||||
|
||||
// check CurrentMajorVersionNumber, which only exists on Windows 10 and later
|
||||
let value_name = "CurrentMajorVersionNumber";
|
||||
key.get_raw_value(value_name).map(|_| true)
|
||||
}
|
||||
|
||||
// 假设 interface_guid 是你的网络接口 GUID
|
||||
pub struct InterfaceControl {
|
||||
interface_guid: String,
|
||||
}
|
||||
|
||||
impl InterfaceControl {
|
||||
// 构造函数
|
||||
pub fn new(interface_guid: &str) -> Self {
|
||||
InterfaceControl {
|
||||
interface_guid: interface_guid.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
// 删除注册表值(模拟 delValue)
|
||||
fn delete_value(key: &RegKey, value_name: &str) -> io::Result<()> {
|
||||
match key.delete_value(value_name) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => {
|
||||
if matches!(e.kind(), io::ErrorKind::NotFound) {
|
||||
Ok(()) // 忽略不存在的值
|
||||
} else {
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_primary_dns(&self, resolvers: &[IpAddr], domains: &[String]) -> io::Result<()> {
|
||||
let (ipsv4, ipsv6): (Vec<String>, Vec<String>) = resolvers
|
||||
.iter()
|
||||
.map(|ip| ip.to_string())
|
||||
.partition(|ip| ip.contains('.'));
|
||||
|
||||
let dom_strs: Vec<String> = domains
|
||||
.iter()
|
||||
.map(|d| d.trim_end_matches('.').to_string())
|
||||
.collect();
|
||||
|
||||
// IPv4 处理
|
||||
if let Ok(key4) = RegistryManager::open_interface_key(
|
||||
&self.interface_guid,
|
||||
RegistryManager::IPV4_TCPIP_INTERFACE_PREFIX,
|
||||
) {
|
||||
if ipsv4.is_empty() {
|
||||
Self::delete_value(&key4, "NameServer")?;
|
||||
} else {
|
||||
key4.set_value("NameServer", &ipsv4.join(","))?;
|
||||
}
|
||||
|
||||
if dom_strs.is_empty() {
|
||||
Self::delete_value(&key4, "SearchList")?;
|
||||
} else {
|
||||
key4.set_value("SearchList", &dom_strs.join(","))?;
|
||||
}
|
||||
|
||||
// 禁用 LLMNR(通过 DisableMulticast)
|
||||
key4.set_value("EnableMulticast", &0u32)?;
|
||||
}
|
||||
|
||||
// IPv6 处理
|
||||
if let Ok(key6) = RegistryManager::open_interface_key(
|
||||
&self.interface_guid,
|
||||
RegistryManager::IPV6_TCPIP_INTERFACE_PREFIX,
|
||||
) {
|
||||
if ipsv6.is_empty() {
|
||||
Self::delete_value(&key6, "NameServer")?;
|
||||
} else {
|
||||
key6.set_value("NameServer", &ipsv6.join(","))?;
|
||||
}
|
||||
|
||||
if dom_strs.is_empty() {
|
||||
Self::delete_value(&key6, "SearchList")?;
|
||||
} else {
|
||||
key6.set_value("SearchList", &dom_strs.join(","))?;
|
||||
}
|
||||
key6.set_value("EnableMulticast", &0u32)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush_dns(&self) -> io::Result<()> {
|
||||
// 刷新 DNS 缓存
|
||||
let output = Command::new("ipconfig")
|
||||
.arg("/flushdns")
|
||||
.output()
|
||||
.expect("failed to execute process");
|
||||
if !output.status.success() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Failed to flush DNS cache",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// re-register DNS
|
||||
pub fn re_register_dns(&self) -> io::Result<()> {
|
||||
// ipconfig /registerdns
|
||||
let output = Command::new("ipconfig")
|
||||
.arg("/registerdns")
|
||||
.output()
|
||||
.expect("failed to execute process");
|
||||
if !output.status.success() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Failed to register DNS",
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WindowsDNSManager {
|
||||
tun_dev_name: String,
|
||||
interface_control: InterfaceControl,
|
||||
}
|
||||
|
||||
impl WindowsDNSManager {
|
||||
pub fn new(tun_dev_name: &str) -> io::Result<Self> {
|
||||
let interface_guid = RegistryManager::find_interface_guid(tun_dev_name)?;
|
||||
Ok(WindowsDNSManager {
|
||||
tun_dev_name: tun_dev_name.to_string(),
|
||||
interface_control: InterfaceControl::new(&interface_guid),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_primary_dns(&self, resolvers: &[IpAddr], domains: &[String]) -> io::Result<()> {
|
||||
self.interface_control.set_primary_dns(resolvers, domains)?;
|
||||
self.interface_control.flush_dns()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemConfig for WindowsDNSManager {
|
||||
fn set_dns(&self, cfg: &OSConfig) -> io::Result<()> {
|
||||
self.set_primary_dns(
|
||||
&cfg.nameservers
|
||||
.iter()
|
||||
.map(|s| s.parse::<IpAddr>().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
&cfg.match_domains,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn close(&self) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use cidr::Ipv4Inet;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[tokio::test]
|
||||
async fn test_windows_set_primary_server() {
|
||||
use std::{net::Ipv4Addr, str::FromStr as _, time::Duration};
|
||||
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::instance::dns_server::{
|
||||
runner::DnsRunner,
|
||||
tests::{check_dns_record, prepare_env},
|
||||
};
|
||||
|
||||
let tun_ip = Ipv4Inet::from_str("10.144.144.10/24").unwrap();
|
||||
let (peer_mgr, virtual_nic) = prepare_env("test1", tun_ip).await;
|
||||
let tun_name = virtual_nic.ifname().await.unwrap();
|
||||
|
||||
println!("dev_name: {}", tun_name);
|
||||
let fake_ip = Ipv4Addr::from_str("100.100.100.101").unwrap();
|
||||
let mut dns_runner = DnsRunner::new(peer_mgr, Some(tun_name.clone()), tun_ip, fake_ip);
|
||||
|
||||
let cancel_token = CancellationToken::new();
|
||||
let cancel_token_clone = cancel_token.clone();
|
||||
let t = tokio::spawn(async move {
|
||||
dns_runner.run(cancel_token_clone).await;
|
||||
});
|
||||
|
||||
// windows is slow to add a ip address, wait for a longer time for dns server ready ,with ping
|
||||
let now = std::time::Instant::now();
|
||||
while now.elapsed() < Duration::from_secs(15) {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
if let Ok(o) = tokio::process::Command::new("ping")
|
||||
.arg("-n")
|
||||
.arg("1")
|
||||
.arg("-w")
|
||||
.arg("100")
|
||||
.arg(&fake_ip.to_string())
|
||||
.output()
|
||||
.await
|
||||
{
|
||||
if o.status.success() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
check_dns_record(&fake_ip, "test1.et.net", "10.144.144.10").await;
|
||||
|
||||
let dns_mgr = super::WindowsDNSManager::new(&tun_name).unwrap();
|
||||
println!("dev_name: {}", tun_name);
|
||||
println!("guid: {}", dns_mgr.interface_control.interface_guid);
|
||||
|
||||
dns_mgr
|
||||
.interface_control
|
||||
.set_primary_dns(
|
||||
&["100.100.100.101".parse().unwrap()],
|
||||
&[".et.net.".to_string()],
|
||||
)
|
||||
.unwrap();
|
||||
dns_mgr.interface_control.flush_dns().unwrap();
|
||||
|
||||
tracing::info!("check dns record with nslookup");
|
||||
|
||||
// nslookup should return 10.144.144.10
|
||||
let ret = tokio::process::Command::new("nslookup")
|
||||
.arg("test1.et.net")
|
||||
.output()
|
||||
.await
|
||||
.expect("failed to execute process");
|
||||
assert!(ret.status.success());
|
||||
let output = String::from_utf8_lossy(&ret.stdout);
|
||||
println!("nslookup output: {}", output);
|
||||
assert!(output.contains("10.144.144.10"));
|
||||
|
||||
cancel_token.cancel();
|
||||
let _ = t.await;
|
||||
}
|
||||
}
|
||||
134
easytier/src/instance/dns_server/tests.rs
Normal file
134
easytier/src/instance/dns_server/tests.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::str::FromStr as _;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use cidr::Ipv4Inet;
|
||||
use hickory_client::client::{Client, ClientHandle as _};
|
||||
use hickory_proto::rr;
|
||||
use hickory_proto::runtime::TokioRuntimeProvider;
|
||||
use hickory_proto::udp::UdpClientStream;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::common::global_ctx::tests::get_mock_global_ctx;
|
||||
use crate::connector::udp_hole_punch::tests::replace_stun_info_collector;
|
||||
|
||||
use crate::instance::dns_server::runner::DnsRunner;
|
||||
use crate::instance::dns_server::server_instance::MagicDnsServerInstance;
|
||||
use crate::instance::dns_server::DEFAULT_ET_DNS_ZONE;
|
||||
use crate::instance::virtual_nic::NicCtx;
|
||||
use crate::peers::peer_manager::{PeerManager, RouteAlgoType};
|
||||
|
||||
use crate::peers::create_packet_recv_chan;
|
||||
use crate::proto::cli::Route;
|
||||
use crate::proto::common::NatType;
|
||||
|
||||
pub async fn prepare_env(dns_name: &str, tun_ip: Ipv4Inet) -> (Arc<PeerManager>, NicCtx) {
|
||||
let ctx = get_mock_global_ctx();
|
||||
ctx.set_hostname(dns_name.to_owned());
|
||||
ctx.set_ipv4(Some(tun_ip));
|
||||
let (s, r) = create_packet_recv_chan();
|
||||
let peer_mgr = Arc::new(PeerManager::new(RouteAlgoType::Ospf, ctx, s));
|
||||
peer_mgr.run().await.unwrap();
|
||||
replace_stun_info_collector(peer_mgr.clone(), NatType::PortRestricted);
|
||||
|
||||
let r = Arc::new(tokio::sync::Mutex::new(r));
|
||||
let mut virtual_nic = NicCtx::new(peer_mgr.get_global_ctx(), &peer_mgr, r);
|
||||
virtual_nic.run(tun_ip).await.unwrap();
|
||||
|
||||
(peer_mgr, virtual_nic)
|
||||
}
|
||||
|
||||
pub async fn check_dns_record(fake_ip: &Ipv4Addr, domain: &str, expected_ip: &str) {
|
||||
let stream = UdpClientStream::builder(
|
||||
SocketAddr::new(fake_ip.clone().into(), 53),
|
||||
TokioRuntimeProvider::default(),
|
||||
)
|
||||
.build();
|
||||
let (mut client, background) = Client::connect(stream).await.unwrap();
|
||||
let background_task = tokio::spawn(background);
|
||||
let response = client
|
||||
.query(
|
||||
rr::Name::from_str(domain).unwrap(),
|
||||
rr::DNSClass::IN,
|
||||
rr::RecordType::A,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
drop(background_task);
|
||||
|
||||
println!("Response: {:?}", response);
|
||||
|
||||
assert_eq!(response.answers().len(), 1, "{:?}", response.answers());
|
||||
let resp = response.answers().first().unwrap();
|
||||
assert_eq!(
|
||||
resp.clone().into_parts().rdata.into_a().unwrap().0,
|
||||
expected_ip.parse::<Ipv4Addr>().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_magic_dns_server_instance() {
|
||||
let tun_ip = Ipv4Inet::from_str("10.144.144.10/24").unwrap();
|
||||
let (peer_mgr, virtual_nic) = prepare_env("test1", tun_ip).await;
|
||||
let tun_name = virtual_nic.ifname().await.unwrap();
|
||||
let fake_ip = Ipv4Addr::from_str("100.100.100.101").unwrap();
|
||||
let dns_server_inst =
|
||||
MagicDnsServerInstance::new(peer_mgr.clone(), Some(tun_name), tun_ip, fake_ip)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let routes = vec![Route {
|
||||
hostname: "test1".to_string(),
|
||||
ipv4_addr: Some(Ipv4Inet::from_str("8.8.8.8/24").unwrap().into()),
|
||||
..Default::default()
|
||||
}];
|
||||
dns_server_inst
|
||||
.data
|
||||
.update_dns_records(routes.iter(), DEFAULT_ET_DNS_ZONE)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
check_dns_record(&fake_ip, "test1.et.net", "8.8.8.8").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_magic_dns_runner() {
|
||||
let tun_ip = Ipv4Inet::from_str("10.144.144.10/24").unwrap();
|
||||
let (peer_mgr, virtual_nic) = prepare_env("test1", tun_ip).await;
|
||||
let tun_name = virtual_nic.ifname().await.unwrap();
|
||||
let fake_ip = Ipv4Addr::from_str("100.100.100.101").unwrap();
|
||||
let mut dns_runner = DnsRunner::new(peer_mgr, Some(tun_name), tun_ip, fake_ip);
|
||||
|
||||
let cancel_token = CancellationToken::new();
|
||||
let cancel_token_clone = cancel_token.clone();
|
||||
let t = tokio::spawn(async move {
|
||||
dns_runner.run(cancel_token_clone).await;
|
||||
});
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
check_dns_record(&fake_ip, "test1.et.net", "10.144.144.10").await;
|
||||
|
||||
// add a new dns runner
|
||||
let tun_ip2 = Ipv4Inet::from_str("10.144.144.20/24").unwrap();
|
||||
let (peer_mgr, virtual_nic) = prepare_env("test2", tun_ip2).await;
|
||||
let tun_name2 = virtual_nic.ifname().await.unwrap();
|
||||
let mut dns_runner2 = DnsRunner::new(peer_mgr, Some(tun_name2), tun_ip2, fake_ip);
|
||||
let cancel_token2 = CancellationToken::new();
|
||||
let cancel_token2_clone = cancel_token2.clone();
|
||||
let t2 = tokio::spawn(async move {
|
||||
dns_runner2.run(cancel_token2_clone).await;
|
||||
});
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
check_dns_record(&fake_ip, "test1.et.net", "10.144.144.10").await;
|
||||
check_dns_record(&fake_ip, "test2.et.net", "10.144.144.20").await;
|
||||
|
||||
// stop runner 1, runner 2 will take over the dns server
|
||||
cancel_token.cancel();
|
||||
t.await.unwrap();
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
check_dns_record(&fake_ip, "test2.et.net", "10.144.144.20").await;
|
||||
|
||||
cancel_token2.cancel();
|
||||
t2.await.unwrap();
|
||||
}
|
||||
@@ -7,7 +7,9 @@ use std::sync::{Arc, Weak};
|
||||
use anyhow::Context;
|
||||
use cidr::Ipv4Inet;
|
||||
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::{sync::Mutex, task::JoinSet};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::common::config::ConfigLoader;
|
||||
use crate::common::error::Error;
|
||||
@@ -34,6 +36,8 @@ use crate::proto::rpc_types::controller::BaseController;
|
||||
use crate::tunnel::tcp::TcpTunnelListener;
|
||||
use crate::vpn_portal::{self, VpnPortal};
|
||||
|
||||
use super::dns_server::runner::DnsRunner;
|
||||
use super::dns_server::MAGIC_DNS_FAKE_IP;
|
||||
use super::listeners::ListenerManager;
|
||||
|
||||
#[cfg(feature = "socks5")]
|
||||
@@ -76,7 +80,13 @@ impl IpProxy {
|
||||
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
self.tcp_proxy.start(true).await?;
|
||||
self.icmp_proxy.start().await?;
|
||||
if let Err(e) = self.icmp_proxy.start().await {
|
||||
tracing::error!("start icmp proxy failed: {:?}", e);
|
||||
if cfg!(not(target_os = "android")) {
|
||||
// android may not support icmp proxy
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
self.udp_proxy.start().await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -101,7 +111,49 @@ impl NicCtx {
|
||||
}
|
||||
}
|
||||
|
||||
type ArcNicCtx = Arc<Mutex<Option<Box<dyn Any + 'static + Send>>>>;
|
||||
struct MagicDnsContainer {
|
||||
dns_runner_task: JoinHandle<()>,
|
||||
dns_runner_cancel_token: CancellationToken,
|
||||
}
|
||||
|
||||
// nic container will be cleared when dhcp ip changed
|
||||
pub(crate) struct NicCtxContainer {
|
||||
nic_ctx: Option<Box<dyn Any + 'static + Send>>,
|
||||
magic_dns: Option<MagicDnsContainer>,
|
||||
}
|
||||
|
||||
impl NicCtxContainer {
|
||||
fn new(nic_ctx: NicCtx, dns_runner: Option<DnsRunner>) -> Self {
|
||||
if let Some(mut dns_runner) = dns_runner {
|
||||
let token = CancellationToken::new();
|
||||
let token_clone = token.clone();
|
||||
let task = tokio::spawn(async move {
|
||||
let _ = dns_runner.run(token_clone).await;
|
||||
});
|
||||
Self {
|
||||
nic_ctx: Some(Box::new(nic_ctx)),
|
||||
magic_dns: Some(MagicDnsContainer {
|
||||
dns_runner_task: task,
|
||||
dns_runner_cancel_token: token,
|
||||
}),
|
||||
}
|
||||
} else {
|
||||
Self {
|
||||
nic_ctx: Some(Box::new(nic_ctx)),
|
||||
magic_dns: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn new_with_any<T: 'static + Send>(ctx: T) -> Self {
|
||||
Self {
|
||||
nic_ctx: Some(Box::new(ctx)),
|
||||
magic_dns: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ArcNicCtx = Arc<Mutex<Option<NicCtxContainer>>>;
|
||||
|
||||
pub struct Instance {
|
||||
inst_name: String,
|
||||
@@ -233,7 +285,14 @@ impl Instance {
|
||||
arc_nic_ctx: ArcNicCtx,
|
||||
packet_recv: Arc<Mutex<PacketRecvChanReceiver>>,
|
||||
) {
|
||||
let _ = arc_nic_ctx.lock().await.take();
|
||||
if let Some(old_ctx) = arc_nic_ctx.lock().await.take() {
|
||||
if let Some(dns_runner) = old_ctx.magic_dns {
|
||||
dns_runner.dns_runner_cancel_token.cancel();
|
||||
tracing::debug!("cancelling dns runner task");
|
||||
let ret = dns_runner.dns_runner_task.await;
|
||||
tracing::debug!("dns runner task cancelled, ret: {:?}", ret);
|
||||
}
|
||||
};
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
tasks.spawn(async move {
|
||||
@@ -242,14 +301,40 @@ impl Instance {
|
||||
tracing::trace!("packet consumed by mock nic ctx: {:?}", packet);
|
||||
}
|
||||
});
|
||||
arc_nic_ctx.lock().await.replace(Box::new(tasks));
|
||||
arc_nic_ctx
|
||||
.lock()
|
||||
.await
|
||||
.replace(NicCtxContainer::new_with_any(tasks));
|
||||
|
||||
tracing::debug!("nic ctx cleared.");
|
||||
}
|
||||
|
||||
async fn use_new_nic_ctx(arc_nic_ctx: ArcNicCtx, nic_ctx: NicCtx) {
|
||||
fn create_magic_dns_runner(
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
tun_dev: Option<String>,
|
||||
tun_ip: Ipv4Inet,
|
||||
) -> Option<DnsRunner> {
|
||||
let ctx = peer_mgr.get_global_ctx();
|
||||
if !ctx.config.get_flags().accept_dns {
|
||||
return None;
|
||||
}
|
||||
|
||||
let runner = DnsRunner::new(
|
||||
peer_mgr,
|
||||
tun_dev,
|
||||
tun_ip,
|
||||
MAGIC_DNS_FAKE_IP.parse().unwrap(),
|
||||
);
|
||||
Some(runner)
|
||||
}
|
||||
|
||||
async fn use_new_nic_ctx(
|
||||
arc_nic_ctx: ArcNicCtx,
|
||||
nic_ctx: NicCtx,
|
||||
magic_dns: Option<DnsRunner>,
|
||||
) {
|
||||
let mut g = arc_nic_ctx.lock().await;
|
||||
*g = Some(Box::new(nic_ctx));
|
||||
*g = Some(NicCtxContainer::new(nic_ctx, magic_dns));
|
||||
tracing::debug!("nic ctx updated.");
|
||||
}
|
||||
|
||||
@@ -339,7 +424,17 @@ impl Instance {
|
||||
global_ctx_c.set_ipv4(None);
|
||||
continue;
|
||||
}
|
||||
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx).await;
|
||||
let ifname = new_nic_ctx.ifname().await;
|
||||
Self::use_new_nic_ctx(
|
||||
nic_ctx.clone(),
|
||||
new_nic_ctx,
|
||||
Self::create_magic_dns_runner(
|
||||
peer_manager_c.clone(),
|
||||
ifname,
|
||||
ip.clone(),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
current_dhcp_ip = Some(ip);
|
||||
@@ -374,7 +469,17 @@ impl Instance {
|
||||
self.peer_packet_receiver.clone(),
|
||||
);
|
||||
new_nic_ctx.run(ipv4_addr).await?;
|
||||
Self::use_new_nic_ctx(self.nic_ctx.clone(), new_nic_ctx).await;
|
||||
let ifname = new_nic_ctx.ifname().await;
|
||||
Self::use_new_nic_ctx(
|
||||
self.nic_ctx.clone(),
|
||||
new_nic_ctx,
|
||||
Self::create_magic_dns_runner(
|
||||
self.peer_manager.clone(),
|
||||
ifname,
|
||||
ipv4_addr.clone(),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -417,7 +522,13 @@ impl Instance {
|
||||
}
|
||||
|
||||
#[cfg(feature = "socks5")]
|
||||
self.socks5_server.run().await?;
|
||||
self.socks5_server
|
||||
.run(
|
||||
self.kcp_proxy_src
|
||||
.as_ref()
|
||||
.map(|x| Arc::downgrade(&x.get_kcp_endpoint())),
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.run_rpc_server().await?;
|
||||
|
||||
@@ -605,7 +716,13 @@ impl Instance {
|
||||
.run_for_android(fd)
|
||||
.await
|
||||
.with_context(|| "add ip failed")?;
|
||||
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx).await;
|
||||
|
||||
let magic_dns_runner = if let Some(ipv4) = global_ctx.get_ipv4() {
|
||||
Self::create_magic_dns_runner(peer_manager.clone(), None, ipv4)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx, magic_dns_runner).await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{fmt::Debug, sync::Arc};
|
||||
use std::{fmt::Debug, net::IpAddr, str::FromStr, sync::Arc};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
@@ -54,6 +54,14 @@ pub fn is_url_host_ipv6(l: &url::Url) -> bool {
|
||||
l.host_str().map_or(false, |h| h.contains(':'))
|
||||
}
|
||||
|
||||
pub fn is_url_host_unspecified(l: &url::Url) -> bool {
|
||||
if let Ok(ip) = IpAddr::from_str(l.host_str().unwrap_or_default()) {
|
||||
ip.is_unspecified()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait TunnelHandlerForListener {
|
||||
async fn handle_tunnel(&self, tunnel: Box<dyn Tunnel>) -> Result<(), Error>;
|
||||
@@ -126,7 +134,10 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
|
||||
)
|
||||
.await?;
|
||||
|
||||
if self.global_ctx.config.get_flags().enable_ipv6 && !is_url_host_ipv6(&l) {
|
||||
if self.global_ctx.config.get_flags().enable_ipv6
|
||||
&& !is_url_host_ipv6(&l)
|
||||
&& is_url_host_unspecified(&l)
|
||||
{
|
||||
let mut ipv6_listener = l.clone();
|
||||
ipv6_listener
|
||||
.set_host(Some("[::]".to_string().as_str()))
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod dns_server;
|
||||
pub mod instance;
|
||||
pub mod listeners;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user