Compare commits
40 Commits
v2.3.0
...
releases/v
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dde7a4dff1 | ||
|
|
40601bd05b | ||
|
|
72d5ed908e | ||
|
|
72673a9d52 | ||
|
|
327ccdcf38 | ||
|
|
8c2f96d1aa | ||
|
|
34ba0bc95b | ||
|
|
ed162c2e66 | ||
|
|
40b5fe9a54 | ||
|
|
5a98fac395 | ||
|
|
0bab14cd72 | ||
|
|
b407cfd9d4 | ||
|
|
25dcdc652a | ||
|
|
950cb04534 | ||
|
|
c07d1286ef | ||
|
|
8ddd153022 | ||
|
|
870353c499 | ||
|
|
ecebbecd3b | ||
|
|
f39fbb2ce2 | ||
|
|
ec56c0bc45 | ||
|
|
20a6025075 | ||
|
|
707963c0d9 | ||
|
|
3c7837692e | ||
|
|
f890812577 | ||
|
|
47f3efe71b | ||
|
|
6d88b10b14 | ||
|
|
d34a51739f | ||
|
|
a6773aa549 | ||
|
|
0314c66635 | ||
|
|
3fb172b4d2 | ||
|
|
96fc19b803 | ||
|
|
9f7ba8ab8f | ||
|
|
e592e9f29a | ||
|
|
4608bca998 | ||
|
|
b5dfc7374c | ||
|
|
b469f8197a | ||
|
|
0a38a8ef4a | ||
|
|
e75be7801f | ||
|
|
6c49bb1865 | ||
|
|
f9c24bc205 |
@@ -6,72 +6,84 @@ rustflags = ["-C", "linker-flavor=ld.lld"]
|
||||
linker = "aarch64-linux-gnu-gcc"
|
||||
|
||||
[target.aarch64-unknown-linux-musl]
|
||||
linker = "aarch64-linux-musl-gcc"
|
||||
linker = "aarch64-unknown-linux-musl-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.'cfg(all(windows, target_env = "msvc"))']
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.mipsel-unknown-linux-musl]
|
||||
linker = "mipsel-linux-muslsf-gcc"
|
||||
linker = "mipsel-unknown-linux-muslsf-gcc"
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+crt-static",
|
||||
"-L",
|
||||
"./musl_gcc/mipsel-linux-muslsf-cross/mipsel-linux-muslsf/lib",
|
||||
"./musl_gcc/mipsel-unknown-linux-muslsf/mipsel-unknown-linux-muslsf/lib",
|
||||
"-L",
|
||||
"./musl_gcc/mipsel-linux-muslsf-cross/lib/gcc/mipsel-linux-muslsf/11.2.1",
|
||||
"./musl_gcc/mipsel-unknown-linux-muslsf/mipsel-unknown-linux-muslsf/sysroot/usr/lib",
|
||||
"-L",
|
||||
"./musl_gcc/mipsel-unknown-linux-muslsf/lib/gcc/mipsel-unknown-linux-muslsf/15.1.0",
|
||||
"-l",
|
||||
"atomic",
|
||||
"-l",
|
||||
"ctz",
|
||||
"-l",
|
||||
"gcc",
|
||||
]
|
||||
|
||||
[target.mips-unknown-linux-musl]
|
||||
linker = "mips-linux-muslsf-gcc"
|
||||
linker = "mips-unknown-linux-muslsf-gcc"
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+crt-static",
|
||||
"-L",
|
||||
"./musl_gcc/mips-linux-muslsf-cross/mips-linux-muslsf/lib",
|
||||
"./musl_gcc/mips-unknown-linux-muslsf/mips-unknown-linux-muslsf/lib",
|
||||
"-L",
|
||||
"./musl_gcc/mips-linux-muslsf-cross/lib/gcc/mips-linux-muslsf/11.2.1",
|
||||
"./musl_gcc/mips-unknown-linux-muslsf/mips-unknown-linux-muslsf/sysroot/usr/lib",
|
||||
"-L",
|
||||
"./musl_gcc/mips-unknown-linux-muslsf/lib/gcc/mips-unknown-linux-muslsf/15.1.0",
|
||||
"-l",
|
||||
"atomic",
|
||||
"-l",
|
||||
"ctz",
|
||||
"-l",
|
||||
"gcc",
|
||||
]
|
||||
|
||||
[target.armv7-unknown-linux-musleabihf]
|
||||
linker = "armv7l-linux-musleabihf-gcc"
|
||||
linker = "armv7-unknown-linux-musleabihf-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.armv7-unknown-linux-musleabi]
|
||||
linker = "armv7m-linux-musleabi-gcc"
|
||||
linker = "armv7-unknown-linux-musleabi-gcc"
|
||||
rustflags = ["-C", "target-feature=+crt-static"]
|
||||
|
||||
[target.arm-unknown-linux-musleabihf]
|
||||
linker = "arm-linux-musleabihf-gcc"
|
||||
linker = "arm-unknown-linux-musleabihf-gcc"
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+crt-static",
|
||||
"-L",
|
||||
"./musl_gcc/arm-linux-musleabihf-cross/arm-linux-musleabihf/lib",
|
||||
"./musl_gcc/arm-unknown-linux-musleabihf/arm-unknown-linux-musleabihf/lib",
|
||||
"-L",
|
||||
"./musl_gcc/arm-linux-musleabihf-cross/lib/gcc/arm-linux-musleabihf/11.2.1",
|
||||
"./musl_gcc/arm-unknown-linux-musleabihf/lib/gcc/arm-unknown-linux-musleabihf/15.1.0",
|
||||
"-l",
|
||||
"atomic",
|
||||
"-l",
|
||||
"gcc",
|
||||
]
|
||||
|
||||
[target.arm-unknown-linux-musleabi]
|
||||
linker = "arm-linux-musleabi-gcc"
|
||||
linker = "arm-unknown-linux-musleabi-gcc"
|
||||
rustflags = [
|
||||
"-C",
|
||||
"target-feature=+crt-static",
|
||||
"-L",
|
||||
"./musl_gcc/arm-linux-musleabi-cross/arm-linux-musleabi/lib",
|
||||
"./musl_gcc/arm-unknown-linux-musleabi/arm-unknown-linux-musleabi/lib",
|
||||
"-L",
|
||||
"./musl_gcc/arm-linux-musleabi-cross/lib/gcc/arm-linux-musleabi/11.2.1",
|
||||
"./musl_gcc/arm-unknown-linux-musleabi/lib/gcc/arm-unknown-linux-musleabi/15.1.0",
|
||||
"-l",
|
||||
"atomic",
|
||||
"-l",
|
||||
"gcc",
|
||||
]
|
||||
|
||||
29
.github/workflows/core.yml
vendored
29
.github/workflows/core.yml
vendored
@@ -166,18 +166,23 @@ jobs:
|
||||
if: ${{ ! endsWith(matrix.TARGET, 'freebsd') }}
|
||||
run: |
|
||||
bash ./.github/workflows/install_rust.sh
|
||||
|
||||
# we set the sysroot when sysroot is a dir
|
||||
# this dir is a soft link generated by install_rust.sh
|
||||
# kcp-sys need this to gen ffi bindings. without this clang may fail to find some libc headers such as bits/libc-header-start.h
|
||||
export KCP_SYS_EXTRA_HEADER_PATH=/usr/include/musl-cross
|
||||
if [[ -d "./musl_gcc/sysroot" ]]; then
|
||||
export BINDGEN_EXTRA_CLANG_ARGS=--sysroot=$(readlink -f ./musl_gcc/sysroot)
|
||||
fi
|
||||
|
||||
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
|
||||
cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips --package=easytier
|
||||
cargo +nightly build -r --target $TARGET -Z build-std=std,panic_abort --package=easytier
|
||||
else
|
||||
if [[ $OS =~ ^windows.*$ ]]; then
|
||||
SUFFIX=.exe
|
||||
fi
|
||||
cargo build --release --verbose --target $TARGET --package=easytier-web --features=embed
|
||||
cargo build --release --target $TARGET --package=easytier-web --features=embed
|
||||
mv ./target/$TARGET/release/easytier-web"$SUFFIX" ./target/$TARGET/release/easytier-web-embed"$SUFFIX"
|
||||
cargo build --release --verbose --target $TARGET
|
||||
cargo build --release --target $TARGET
|
||||
fi
|
||||
|
||||
# Copied and slightly modified from @lmq8267 (https://github.com/lmq8267)
|
||||
@@ -218,13 +223,6 @@ jobs:
|
||||
mv ./target/$TARGET/release/easytier-web ./target/$TARGET/release/easytier-web-embed
|
||||
cargo build --release --verbose --target $TARGET
|
||||
|
||||
- name: Install UPX
|
||||
if: ${{ matrix.OS != 'macos-latest' }}
|
||||
uses: crazy-max/ghaction-upx@v3
|
||||
with:
|
||||
version: latest
|
||||
install-only: true
|
||||
|
||||
- name: Compress
|
||||
run: |
|
||||
mkdir -p ./artifacts/objects/
|
||||
@@ -246,8 +244,11 @@ jobs:
|
||||
fi
|
||||
|
||||
if [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^.*freebsd$ ]]; then
|
||||
upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
|
||||
upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX"
|
||||
UPX_VERSION=4.2.4
|
||||
curl -L https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz -s | tar xJvf -
|
||||
cp upx-${UPX_VERSION}-amd64_linux/upx .
|
||||
./upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
|
||||
./upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX"
|
||||
fi
|
||||
|
||||
mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/
|
||||
@@ -315,4 +316,4 @@ jobs:
|
||||
./easytier-contrib/easytier-magisk
|
||||
!./easytier-contrib/easytier-magisk/build.sh
|
||||
!./easytier-contrib/easytier-magisk/magisk_update.json
|
||||
if-no-files-found: error
|
||||
if-no-files-found: error
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
image_tag:
|
||||
description: 'Tag for this image build'
|
||||
type: string
|
||||
default: 'v2.3.0'
|
||||
default: 'v2.3.2'
|
||||
required: true
|
||||
mark_latest:
|
||||
description: 'Mark this image as latest'
|
||||
|
||||
48
.github/workflows/install_rust.sh
vendored
48
.github/workflows/install_rust.sh
vendored
@@ -8,38 +8,22 @@
|
||||
# dependencies are only needed on ubuntu as that's the only place where
|
||||
# we make cross-compilation
|
||||
if [[ $OS =~ ^ubuntu.*$ ]]; then
|
||||
sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools libappindicator3-dev llvm clang
|
||||
# curl -s musl.cc | grep mipsel
|
||||
case $TARGET in
|
||||
mipsel-unknown-linux-musl)
|
||||
MUSL_URI=mipsel-linux-muslsf
|
||||
;;
|
||||
mips-unknown-linux-musl)
|
||||
MUSL_URI=mips-linux-muslsf
|
||||
;;
|
||||
aarch64-unknown-linux-musl)
|
||||
MUSL_URI=aarch64-linux-musl
|
||||
;;
|
||||
armv7-unknown-linux-musleabihf)
|
||||
MUSL_URI=armv7l-linux-musleabihf
|
||||
;;
|
||||
armv7-unknown-linux-musleabi)
|
||||
MUSL_URI=armv7m-linux-musleabi
|
||||
;;
|
||||
arm-unknown-linux-musleabihf)
|
||||
MUSL_URI=arm-linux-musleabihf
|
||||
;;
|
||||
arm-unknown-linux-musleabi)
|
||||
MUSL_URI=arm-linux-musleabi
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -n "$MUSL_URI" ]; then
|
||||
sudo apt-get update && sudo apt-get install -qq musl-tools libappindicator3-dev llvm clang
|
||||
# https://github.com/cross-tools/musl-cross/releases
|
||||
# if "musl" is a substring of TARGET, we assume that we are using musl
|
||||
MUSL_TARGET=$TARGET
|
||||
# if target is mips or mipsel, we should use soft-float version of musl
|
||||
if [[ $TARGET =~ ^mips.*$ || $TARGET =~ ^mipsel.*$ ]]; then
|
||||
MUSL_TARGET=${TARGET}sf
|
||||
fi
|
||||
if [[ $MUSL_TARGET =~ musl ]]; then
|
||||
mkdir -p ./musl_gcc
|
||||
wget --inet4-only -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/
|
||||
tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/
|
||||
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/
|
||||
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/${MUSL_URI}/include/ /usr/include/musl-cross
|
||||
wget --inet4-only -c https://github.com/cross-tools/musl-cross/releases/download/20250520/${MUSL_TARGET}.tar.xz -P ./musl_gcc/
|
||||
tar xf ./musl_gcc/${MUSL_TARGET}.tar.xz -C ./musl_gcc/
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/bin/*gcc /usr/bin/
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/include/ /usr/include/musl-cross
|
||||
sudo ln -sf $(pwd)/musl_gcc/${MUSL_TARGET}/${MUSL_TARGET}/sysroot/ ./musl_gcc/sysroot
|
||||
sudo chmod -R a+rwx ./musl_gcc
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -50,7 +34,7 @@ rustup default 1.86
|
||||
|
||||
# mips/mipsel cannot add target from rustup, need compile by ourselves
|
||||
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
|
||||
cd "$PWD/musl_gcc/${MUSL_URI}-cross/lib/gcc/${MUSL_URI}/11.2.1" || exit 255
|
||||
cd "$PWD/musl_gcc/${MUSL_TARGET}/lib/gcc/${MUSL_TARGET}/15.1.0" || exit 255
|
||||
# for panic-abort
|
||||
cp libgcc_eh.a libunwind.a
|
||||
|
||||
|
||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -21,7 +21,7 @@ on:
|
||||
version:
|
||||
description: 'Version for this release'
|
||||
type: string
|
||||
default: 'v2.3.0'
|
||||
default: 'v2.3.2'
|
||||
required: true
|
||||
make_latest:
|
||||
description: 'Mark this release as latest'
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
repo: EasyTier/EasyTier
|
||||
path: release_assets_nozip
|
||||
|
||||
- name: Download GUI Artifact
|
||||
- name: Download Mobile Artifact
|
||||
uses: dawidd6/action-download-artifact@v6
|
||||
with:
|
||||
github_token: ${{secrets.GITHUB_TOKEN}}
|
||||
@@ -78,7 +78,14 @@ jobs:
|
||||
ls -l -R ./
|
||||
chmod -R 755 .
|
||||
for x in `ls`; do
|
||||
zip ../zipped_assets/$x-${VERSION}.zip $x/*;
|
||||
if [ "$x" = "Easytier-Magisk" ]; then
|
||||
# for Easytier-Magisk, make sure files are in the root of the zip
|
||||
cd $x;
|
||||
zip -r ../../zipped_assets/$x-${VERSION}.zip .;
|
||||
cd ..;
|
||||
else
|
||||
zip -r ../zipped_assets/$x-${VERSION}.zip $x;
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Release
|
||||
|
||||
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@@ -91,6 +91,7 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1 --nocapture
|
||||
sudo prlimit --pid $$ --nofile=1048576:1048576
|
||||
sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose -- --test-threads=1
|
||||
sudo chown -R $USER:$USER ./target
|
||||
sudo chown -R $USER:$USER ~/.cargo
|
||||
|
||||
463
Cargo.lock
generated
463
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -16,5 +16,5 @@ panic = "unwind"
|
||||
panic = "abort"
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
opt-level = 'z'
|
||||
opt-level = 3
|
||||
strip = true
|
||||
|
||||
174
LICENSE
174
LICENSE
@@ -1,73 +1,165 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
||||
0. Additional Definitions.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
||||
4. Combined Works.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
d) Do one of the following:
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
Copyright 2023 sunsijie
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
5. Combined Libraries.
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
||||
|
||||
@@ -99,7 +99,7 @@ EasyTier is a simple, safe and decentralized VPN networking solution implemented
|
||||
|
||||
```sh
|
||||
brew tap brewforge/chinese
|
||||
brew install --cask easytier
|
||||
brew install --cask easytier-gui
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -96,7 +96,7 @@
|
||||
|
||||
```sh
|
||||
brew tap brewforge/chinese
|
||||
brew install --cask easytier
|
||||
brew install --cask easytier-gui
|
||||
```
|
||||
|
||||
## 快速开始
|
||||
|
||||
@@ -14,3 +14,4 @@ dashmap = "6.0"
|
||||
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
uuid = "1.17.0"
|
||||
|
||||
@@ -3,11 +3,14 @@ use std::sync::Mutex;
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::config::{ConfigLoader as _, TomlConfigLoader},
|
||||
launcher::NetworkInstance,
|
||||
instance_manager::NetworkInstanceManager,
|
||||
launcher::ConfigSource,
|
||||
};
|
||||
|
||||
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> =
|
||||
static INSTANCE_NAME_ID_MAP: once_cell::sync::Lazy<DashMap<String, uuid::Uuid>> =
|
||||
once_cell::sync::Lazy::new(DashMap::new);
|
||||
static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> =
|
||||
once_cell::sync::Lazy::new(NetworkInstanceManager::new);
|
||||
|
||||
static ERROR_MSG: once_cell::sync::Lazy<Mutex<Vec<u8>>> =
|
||||
once_cell::sync::Lazy::new(|| Mutex::new(Vec::new()));
|
||||
@@ -86,18 +89,20 @@ pub extern "C" fn run_network_instance(cfg_str: *const std::ffi::c_char) -> std:
|
||||
|
||||
let inst_name = cfg.get_inst_name();
|
||||
|
||||
if INSTANCE_MAP.contains_key(&inst_name) {
|
||||
if INSTANCE_NAME_ID_MAP.contains_key(&inst_name) {
|
||||
set_error_msg("instance already exists");
|
||||
return -1;
|
||||
}
|
||||
|
||||
let mut instance = NetworkInstance::new(cfg);
|
||||
if let Err(e) = instance.start().map_err(|e| e.to_string()) {
|
||||
set_error_msg(&format!("failed to start instance: {}", e));
|
||||
return -1;
|
||||
}
|
||||
let instance_id = match INSTANCE_MANAGER.run_network_instance(cfg, ConfigSource::FFI) {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to start instance: {}", e));
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
INSTANCE_MAP.insert(inst_name, instance);
|
||||
INSTANCE_NAME_ID_MAP.insert(inst_name, instance_id);
|
||||
|
||||
0
|
||||
}
|
||||
@@ -108,7 +113,11 @@ pub extern "C" fn retain_network_instance(
|
||||
length: usize,
|
||||
) -> std::ffi::c_int {
|
||||
if length == 0 {
|
||||
INSTANCE_MAP.clear();
|
||||
if let Err(e) = INSTANCE_MANAGER.retain_network_instance(Vec::new()) {
|
||||
set_error_msg(&format!("failed to retain instances: {}", e));
|
||||
return -1;
|
||||
}
|
||||
INSTANCE_NAME_ID_MAP.clear();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -125,7 +134,17 @@ pub extern "C" fn retain_network_instance(
|
||||
.collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let _ = INSTANCE_MAP.retain(|k, _| inst_names.contains(k));
|
||||
let inst_ids: Vec<uuid::Uuid> = inst_names
|
||||
.iter()
|
||||
.filter_map(|name| INSTANCE_NAME_ID_MAP.get(name).map(|id| *id))
|
||||
.collect();
|
||||
|
||||
if let Err(e) = INSTANCE_MANAGER.retain_network_instance(inst_ids) {
|
||||
set_error_msg(&format!("failed to retain instances: {}", e));
|
||||
return -1;
|
||||
}
|
||||
|
||||
let _ = INSTANCE_NAME_ID_MAP.retain(|k, _| inst_names.contains(k));
|
||||
|
||||
0
|
||||
}
|
||||
@@ -144,13 +163,20 @@ pub extern "C" fn collect_network_infos(
|
||||
std::slice::from_raw_parts_mut(infos, max_length)
|
||||
};
|
||||
|
||||
let collected_infos = match INSTANCE_MANAGER.collect_network_infos() {
|
||||
Ok(infos) => infos,
|
||||
Err(e) => {
|
||||
set_error_msg(&format!("failed to collect network infos: {}", e));
|
||||
return -1;
|
||||
}
|
||||
};
|
||||
|
||||
let mut index = 0;
|
||||
for instance in INSTANCE_MAP.iter() {
|
||||
for (instance_id, value) in collected_infos.iter() {
|
||||
if index >= max_length {
|
||||
break;
|
||||
}
|
||||
let key = instance.key();
|
||||
let Some(value) = instance.get_running_info() else {
|
||||
let Some(key) = INSTANCE_MANAGER.get_network_instance_name(instance_id) else {
|
||||
continue;
|
||||
};
|
||||
// convert value to json string
|
||||
@@ -181,7 +207,6 @@ mod tests {
|
||||
let cfg_str = r#"
|
||||
inst_name = "test"
|
||||
network = "test_network"
|
||||
fdsafdsa
|
||||
"#;
|
||||
let cstr = std::ffi::CString::new(cfg_str).unwrap();
|
||||
assert_eq!(parse_config(cstr.as_ptr()), 0);
|
||||
|
||||
@@ -2,5 +2,5 @@
|
||||
magisk安装后重启
|
||||
|
||||
目录位置:/data/adb/modules/easytier_magisk
|
||||
配置文件位置://data/adb/modules/easytier_magisk/config/config.conf
|
||||
配置文件位置://data/adb/modules/easytier_magisk/config/config.toml
|
||||
修改config.conf即可,修改后配置文件后去magisk app重新开关模块即可生效
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
#!/data/adb/magisk/busybox sh
|
||||
MODDIR=${0%/*}
|
||||
echo 'Easytier 服务停止中....'
|
||||
|
||||
PIDS=$(pgrep -f "^${MODDIR}/easytier-core -c ${MODDIR}/config/config.conf")
|
||||
# 查找 easytier-core 进程的 PID
|
||||
PID=$(pgrep easytier-core)
|
||||
|
||||
if [ -n "$PIDS" ]; then
|
||||
kill $PIDS # 杀死所有匹配的进程
|
||||
echo "已停止所有 Easytier 进程 (PIDs: $PIDS)"
|
||||
# 检查是否找到了进程
|
||||
if [ -z "$PID" ]; then
|
||||
echo "easytier-core 进程未找到"
|
||||
else
|
||||
echo "Easytier 服务未运行"
|
||||
# 结束进程
|
||||
kill $PID
|
||||
echo "已结束 easytier-core 进程 (PID: $PID)"
|
||||
fi
|
||||
echo '重启服务中...'
|
||||
nohup sh ${MODDIR}/service.sh >> ${MODDIR}/log/start.log 2>&1 &
|
||||
echo '服务已重启'
|
||||
exit
|
||||
@@ -2,6 +2,6 @@ ui_print '安装完成'
|
||||
ui_print '当前架构为' + $ARCH
|
||||
ui_print '当前系统版本为' + $API
|
||||
ui_print '安装目录为: /data/adb/modules/easytier_magisk'
|
||||
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.conf'
|
||||
ui_print '配置文件位置: /data/adb/modules/easytier_magisk/config/config.toml'
|
||||
ui_print '修改后配置文件后在magisk app点击操作按钮即可生效'
|
||||
ui_print '记得重启'
|
||||
48
easytier-contrib/easytier-magisk/easytier_core.sh
Normal file
48
easytier-contrib/easytier-magisk/easytier_core.sh
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/system/bin/sh
|
||||
|
||||
MODDIR=${0%/*}
|
||||
CONFIG_FILE="${MODDIR}/config/config.toml"
|
||||
LOG_FILE="${MODDIR}/log.log"
|
||||
MODULE_PROP="${MODDIR}/module.prop"
|
||||
EASYTIER="${MODDIR}/easytier-core"
|
||||
|
||||
# 更新module.prop文件中的description
|
||||
update_module_description() {
|
||||
local status_message=$1
|
||||
sed -i "/^description=/c\description=[状态]${status_message}" ${MODULE_PROP}
|
||||
}
|
||||
|
||||
if [ ! -e /dev/net/tun ]; then
|
||||
if [ ! -d /dev/net ]; then
|
||||
mkdir -p /dev/net
|
||||
fi
|
||||
|
||||
ln -s /dev/tun /dev/net/tun
|
||||
fi
|
||||
|
||||
while true; do
|
||||
if ls $MODDIR | grep -q "disable"; then
|
||||
update_module_description "关闭中"
|
||||
if pgrep -f 'easytier-core' >/dev/null; then
|
||||
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在,正在关闭 ..."
|
||||
pkill easytier-core # 关闭进程
|
||||
fi
|
||||
else
|
||||
if ! pgrep -f 'easytier-core' >/dev/null; then
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
update_module_description "config.toml不存在"
|
||||
sleep 3s
|
||||
continue
|
||||
fi
|
||||
|
||||
TZ=Asia/Shanghai ${EASYTIER} -c ${CONFIG_FILE} > ${LOG_FILE} &
|
||||
sleep 5s # 等待easytier-core启动完成
|
||||
update_module_description "已开启(不一定运行成功)"
|
||||
ip rule add from all lookup main
|
||||
else
|
||||
echo "开关控制$(date "+%Y-%m-%d %H:%M:%S") 进程已存在"
|
||||
fi
|
||||
fi
|
||||
|
||||
sleep 3s # 暂停3秒后再次执行循环
|
||||
done
|
||||
@@ -1,7 +1,7 @@
|
||||
id=easytier_magisk
|
||||
name=easytier_magisk版
|
||||
version=v2.2.4
|
||||
name=EasyTier_Magisk
|
||||
version=v2.3.2
|
||||
versionCode=1
|
||||
author=EasyTier
|
||||
description=easytier_magisk版模块 作者:EasyTier https://github.com/EasyTier/EasyTier
|
||||
description=easytier magisk module @EasyTier(https://github.com/EasyTier/EasyTier)
|
||||
updateJson=https://raw.githubusercontent.com/EasyTier/EasyTier/refs/heads/main/easytier-contrib/easytier-magisk/magisk_update.json
|
||||
|
||||
@@ -1,20 +1,27 @@
|
||||
#!/data/adb/magisk/busybox sh
|
||||
MODDIR=${0%/*}
|
||||
# MODDIR="$(dirname $(readlink -f "$0"))"
|
||||
mkdir -p ${MODDIR}/log
|
||||
chmod 755 ${MODDIR}/*
|
||||
|
||||
echo $MODDIR >> ${MODDIR}/log/start.log
|
||||
# 等待系统启动成功
|
||||
while [ "$(getprop sys.boot_completed)" != "1" ]; do
|
||||
sleep 5s
|
||||
done
|
||||
|
||||
echo "Easytier 服务启动" >> ${MODDIR}/log/start.log
|
||||
# 防止系统挂起
|
||||
echo "PowerManagerService.noSuspend" > /sys/power/wake_lock
|
||||
|
||||
# 启动
|
||||
nohup ${MODDIR}/easytier-core -c ${MODDIR}/config/config.conf >> ${MODDIR}/log/start.log 2>&1 &
|
||||
# 修改模块描述
|
||||
sed -i 's/$(description=)$[^"]*/\1[状态]关闭中/' "$MODDIR/module.prop"
|
||||
|
||||
# 等待 3 秒
|
||||
sleep 3s
|
||||
|
||||
"${MODDIR}/easytier_core.sh" &
|
||||
|
||||
# 检查是否启用模块
|
||||
while [ ! -f ${MODDIR}/disable ]; do
|
||||
sleep 2
|
||||
done
|
||||
PID=$(ps -ef|grep "${MODDIR}/easytier-core -c ${MODDIR}/config/config.conf" | awk '{print $2}')
|
||||
kill $PID
|
||||
echo "Easytier 服务停止" >> ${MODDIR}/log/start.log
|
||||
|
||||
pkill easytier-core
|
||||
|
||||
2
easytier-contrib/easytier-magisk/system/etc/resolv.conf
Normal file
2
easytier-contrib/easytier-magisk/system/etc/resolv.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
nameserver 114.114.114.114
|
||||
nameserver 223.5.5.5
|
||||
@@ -1,2 +1,3 @@
|
||||
MODDIR=${0%/*}
|
||||
pkill easytier-core # 结束 easytier-core 进程
|
||||
rm -rf $MODDIR/*
|
||||
@@ -18,7 +18,11 @@ cd ../tauri-plugin-vpnservice
|
||||
pnpm install
|
||||
pnpm build
|
||||
|
||||
cd ../easytier-gui
|
||||
cd ../easytier-web/frontend-lib
|
||||
pnpm install
|
||||
pnpm build
|
||||
|
||||
cd ../../easytier-gui
|
||||
pnpm install
|
||||
pnpm tauri build
|
||||
```
|
||||
|
||||
@@ -50,7 +50,11 @@ dev_name_placeholder: 注意:当多个网络同时使用相同的TUN接口名
|
||||
off_text: 点击关闭
|
||||
on_text: 点击开启
|
||||
show_config: 显示配置
|
||||
edit_config: 编辑配置文件
|
||||
close: 关闭
|
||||
save: 保存
|
||||
config_saved: 配置已保存
|
||||
|
||||
|
||||
use_latency_first: 延迟优先模式
|
||||
my_node_info: 当前节点信息
|
||||
|
||||
@@ -51,7 +51,10 @@ dev_name_placeholder: 'Note: When multiple networks use the same TUN interface n
|
||||
off_text: Press to disable
|
||||
on_text: Press to enable
|
||||
show_config: Show Config
|
||||
edit_config: Edit Config File
|
||||
close: Close
|
||||
save: Save
|
||||
config_saved: Configuration saved
|
||||
my_node_info: My Node Info
|
||||
peer_count: Connected
|
||||
upload: Upload
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "easytier-gui",
|
||||
"type": "module",
|
||||
"version": "2.2.4",
|
||||
"version": "2.3.2",
|
||||
"private": true,
|
||||
"packageManager": "pnpm@9.12.1+sha512.e5a7e52a4183a02d5931057f7a0dbff9d5e9ce3161e33fa68ae392125b79282a8a8a470a51dfc8a0ed86221442eb2fb57019b0990ed24fab519bf0e1bc5ccfc4",
|
||||
"scripts": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "easytier-gui"
|
||||
version = "2.3.0"
|
||||
version = "2.3.2"
|
||||
description = "EasyTier GUI"
|
||||
authors = ["you"]
|
||||
edition = "2021"
|
||||
@@ -53,6 +53,7 @@ tauri-plugin-positioner = { version = "2.0", features = ["tray-icon"] }
|
||||
tauri-plugin-vpnservice = { path = "../../tauri-plugin-vpnservice" }
|
||||
tauri-plugin-os = "2.0"
|
||||
tauri-plugin-autostart = "2.0"
|
||||
uuid = "1.17.0"
|
||||
|
||||
|
||||
[features]
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::config::{ConfigLoader, FileLoggerConfig, TomlConfigLoader},
|
||||
launcher::{NetworkConfig, NetworkInstance, NetworkInstanceRunningInfo},
|
||||
common::config::{ConfigLoader, FileLoggerConfig, LoggingConfigBuilder, TomlConfigLoader},
|
||||
instance_manager::NetworkInstanceManager,
|
||||
launcher::{ConfigSource, NetworkConfig, NetworkInstanceRunningInfo},
|
||||
utils::{self, NewFilterSender},
|
||||
};
|
||||
|
||||
@@ -17,8 +17,8 @@ pub const AUTOSTART_ARG: &str = "--autostart";
|
||||
#[cfg(not(target_os = "android"))]
|
||||
use tauri::tray::{MouseButton, MouseButtonState, TrayIconBuilder, TrayIconEvent};
|
||||
|
||||
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> =
|
||||
once_cell::sync::Lazy::new(DashMap::new);
|
||||
static INSTANCE_MANAGER: once_cell::sync::Lazy<NetworkInstanceManager> =
|
||||
once_cell::sync::Lazy::new(NetworkInstanceManager::new);
|
||||
|
||||
static mut LOGGER_LEVEL_SENDER: once_cell::sync::Lazy<Option<NewFilterSender>> =
|
||||
once_cell::sync::Lazy::new(Default::default);
|
||||
@@ -42,43 +42,48 @@ fn parse_network_config(cfg: NetworkConfig) -> Result<String, String> {
|
||||
Ok(toml.dump())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn generate_network_config(toml_config: String) -> Result<NetworkConfig, String> {
|
||||
let config = TomlConfigLoader::new_from_str(&toml_config).map_err(|e| e.to_string())?;
|
||||
let cfg = NetworkConfig::new_from_config(&config).map_err(|e| e.to_string())?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn run_network_instance(cfg: NetworkConfig) -> Result<(), String> {
|
||||
if INSTANCE_MAP.contains_key(cfg.instance_id()) {
|
||||
return Err("instance already exists".to_string());
|
||||
}
|
||||
let instance_id = cfg.instance_id().to_string();
|
||||
|
||||
let cfg = cfg.gen_config().map_err(|e| e.to_string())?;
|
||||
let mut instance = NetworkInstance::new(cfg);
|
||||
instance.start().map_err(|e| e.to_string())?;
|
||||
|
||||
INSTANCE_MANAGER
|
||||
.run_network_instance(cfg, ConfigSource::GUI)
|
||||
.map_err(|e| e.to_string())?;
|
||||
println!("instance {} started", instance_id);
|
||||
INSTANCE_MAP.insert(instance_id, instance);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn retain_network_instance(instance_ids: Vec<String>) -> Result<(), String> {
|
||||
let _ = INSTANCE_MAP.retain(|k, _| instance_ids.contains(k));
|
||||
println!(
|
||||
"instance {:?} retained",
|
||||
INSTANCE_MAP
|
||||
.iter()
|
||||
.map(|item| item.key().clone())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
let instance_ids = instance_ids
|
||||
.into_iter()
|
||||
.filter_map(|id| uuid::Uuid::parse_str(&id).ok())
|
||||
.collect();
|
||||
let retained = INSTANCE_MANAGER
|
||||
.retain_network_instance(instance_ids)
|
||||
.map_err(|e| e.to_string())?;
|
||||
println!("instance {:?} retained", retained);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn collect_network_infos() -> Result<BTreeMap<String, NetworkInstanceRunningInfo>, String> {
|
||||
let infos = INSTANCE_MANAGER
|
||||
.collect_network_infos()
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
let mut ret = BTreeMap::new();
|
||||
for instance in INSTANCE_MAP.iter() {
|
||||
if let Some(info) = instance.get_running_info() {
|
||||
ret.insert(instance.key().clone(), info);
|
||||
}
|
||||
for (uuid, info) in infos {
|
||||
ret.insert(uuid.to_string(), info);
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
@@ -97,10 +102,10 @@ fn set_logging_level(level: String) -> Result<(), String> {
|
||||
|
||||
#[tauri::command]
|
||||
fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> {
|
||||
let mut instance = INSTANCE_MAP
|
||||
.get_mut(&instance_id)
|
||||
.ok_or("instance not found")?;
|
||||
instance.set_tun_fd(fd);
|
||||
let uuid = uuid::Uuid::parse_str(&instance_id).map_err(|e| e.to_string())?;
|
||||
INSTANCE_MANAGER
|
||||
.set_tun_fd(&uuid, fd)
|
||||
.map_err(|e| e.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -108,7 +113,12 @@ fn set_tun_fd(instance_id: String, fd: i32) -> Result<(), String> {
|
||||
fn toggle_window_visibility<R: tauri::Runtime>(app: &tauri::AppHandle<R>) {
|
||||
if let Some(window) = app.get_webview_window("main") {
|
||||
if window.is_visible().unwrap_or_default() {
|
||||
let _ = window.hide();
|
||||
if window.is_minimized().unwrap_or_default() {
|
||||
let _ = window.unminimize();
|
||||
let _ = window.set_focus();
|
||||
} else {
|
||||
let _ = window.hide();
|
||||
}
|
||||
} else {
|
||||
let _ = window.show();
|
||||
let _ = window.set_focus();
|
||||
@@ -180,13 +190,15 @@ pub fn run() {
|
||||
let Ok(log_dir) = app.path().app_log_dir() else {
|
||||
return Ok(());
|
||||
};
|
||||
let config = TomlConfigLoader::default();
|
||||
config.set_file_logger_config(FileLoggerConfig {
|
||||
dir: Some(log_dir.to_string_lossy().to_string()),
|
||||
level: None,
|
||||
file: None,
|
||||
});
|
||||
let Ok(Some(logger_reinit)) = utils::init_logger(config, true) else {
|
||||
let config = LoggingConfigBuilder::default()
|
||||
.file_logger(FileLoggerConfig {
|
||||
dir: Some(log_dir.to_string_lossy().to_string()),
|
||||
level: None,
|
||||
file: None,
|
||||
})
|
||||
.build()
|
||||
.map_err(|e| e.to_string())?;
|
||||
let Ok(Some(logger_reinit)) = utils::init_logger(&config, true) else {
|
||||
return Ok(());
|
||||
};
|
||||
#[allow(static_mut_refs)]
|
||||
@@ -219,6 +231,7 @@ pub fn run() {
|
||||
})
|
||||
.invoke_handler(tauri::generate_handler![
|
||||
parse_network_config,
|
||||
generate_network_config,
|
||||
run_network_instance,
|
||||
retain_network_instance,
|
||||
collect_network_infos,
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
"createUpdaterArtifacts": false
|
||||
},
|
||||
"productName": "easytier-gui",
|
||||
"version": "2.3.0",
|
||||
"version": "2.3.2",
|
||||
"identifier": "com.kkrainbow.easytier",
|
||||
"plugins": {},
|
||||
"app": {
|
||||
|
||||
@@ -8,5 +8,6 @@ onBeforeMount(async () => {
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<Toast position="bottom-right" />
|
||||
<RouterView />
|
||||
</template>
|
||||
|
||||
2
easytier-gui/src/auto-imports.d.ts
vendored
2
easytier-gui/src/auto-imports.d.ts
vendored
@@ -23,6 +23,7 @@ declare global {
|
||||
const effectScope: typeof import('vue')['effectScope']
|
||||
const event2human: typeof import('./composables/utils')['event2human']
|
||||
const generateMenuItem: typeof import('./composables/tray')['generateMenuItem']
|
||||
const generateNetworkConfig: typeof import('./composables/network')['generateNetworkConfig']
|
||||
const getActivePinia: typeof import('pinia')['getActivePinia']
|
||||
const getCurrentInstance: typeof import('vue')['getCurrentInstance']
|
||||
const getCurrentScope: typeof import('vue')['getCurrentScope']
|
||||
@@ -134,6 +135,7 @@ declare module 'vue' {
|
||||
readonly defineStore: UnwrapRef<typeof import('pinia')['defineStore']>
|
||||
readonly effectScope: UnwrapRef<typeof import('vue')['effectScope']>
|
||||
readonly generateMenuItem: UnwrapRef<typeof import('./composables/tray')['generateMenuItem']>
|
||||
readonly generateNetworkConfig: UnwrapRef<typeof import('./composables/network')['generateNetworkConfig']>
|
||||
readonly getActivePinia: UnwrapRef<typeof import('pinia')['getActivePinia']>
|
||||
readonly getCurrentInstance: UnwrapRef<typeof import('vue')['getCurrentInstance']>
|
||||
readonly getCurrentScope: UnwrapRef<typeof import('vue')['getCurrentScope']>
|
||||
|
||||
@@ -8,6 +8,10 @@ export async function parseNetworkConfig(cfg: NetworkConfig) {
|
||||
return invoke<string>('parse_network_config', { cfg })
|
||||
}
|
||||
|
||||
export async function generateNetworkConfig(tomlConfig: string) {
|
||||
return invoke<NetworkConfig>('generate_network_config', { tomlConfig })
|
||||
}
|
||||
|
||||
export async function runNetworkInstance(cfg: NetworkConfig) {
|
||||
return invoke('run_network_instance', { cfg })
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import { exit } from '@tauri-apps/plugin-process'
|
||||
import { open } from '@tauri-apps/plugin-shell'
|
||||
import TieredMenu from 'primevue/tieredmenu'
|
||||
import { useToast } from 'primevue/usetoast'
|
||||
import { NetworkTypes, Config, Status, Utils, I18nUtils } from 'easytier-frontend-lib'
|
||||
import { NetworkTypes, Config, Status, Utils, I18nUtils, ConfigEditDialog } from 'easytier-frontend-lib'
|
||||
|
||||
import { isAutostart, setLoggingLevel } from '~/composables/network'
|
||||
import { useTray } from '~/composables/tray'
|
||||
@@ -23,7 +23,7 @@ useTray(true)
|
||||
|
||||
const items = ref([
|
||||
{
|
||||
label: () => t('show_config'),
|
||||
label: () => activeStep.value == "2" ? t('show_config') : t('edit_config'),
|
||||
icon: 'pi pi-file-edit',
|
||||
command: async () => {
|
||||
try {
|
||||
@@ -262,6 +262,13 @@ onMounted(async () => {
|
||||
function isRunning(id: string) {
|
||||
return networkStore.networkInstanceIds.includes(id)
|
||||
}
|
||||
|
||||
async function saveTomlConfig(tomlConfig: string) {
|
||||
const config = await generateNetworkConfig(tomlConfig)
|
||||
networkStore.replaceCurNetwork(config);
|
||||
toast.add({ severity: 'success', detail: t('config_saved'), life: 3000 })
|
||||
visible.value = false
|
||||
}
|
||||
</script>
|
||||
|
||||
<script lang="ts">
|
||||
@@ -269,17 +276,8 @@ function isRunning(id: string) {
|
||||
|
||||
<template>
|
||||
<div id="root" class="flex flex-col">
|
||||
<Dialog v-model:visible="visible" modal header="Config File" :style="{ width: '70%' }">
|
||||
<Panel>
|
||||
<ScrollPanel style="width: 100%; height: 300px">
|
||||
<pre>{{ tomlConfig }}</pre>
|
||||
</ScrollPanel>
|
||||
</Panel>
|
||||
<Divider />
|
||||
<div class="flex gap-2 justify-end">
|
||||
<Button type="button" :label="t('close')" @click="visible = false" />
|
||||
</div>
|
||||
</Dialog>
|
||||
<ConfigEditDialog v-model:visible="visible" :cur-network="curNetworkConfig" :readonly="activeStep !== '1'"
|
||||
:save-config="saveTomlConfig" :generate-config="parseNetworkConfig" />
|
||||
|
||||
<Dialog v-model:visible="aboutVisible" modal :header="t('about.title')" :style="{ width: '70%' }">
|
||||
<About />
|
||||
|
||||
@@ -48,6 +48,12 @@ export const useNetworkStore = defineStore('networkStore', {
|
||||
this.curNetwork = this.networkList[nextCurNetworkIdx]
|
||||
},
|
||||
|
||||
replaceCurNetwork(cfg: NetworkTypes.NetworkConfig) {
|
||||
const curNetworkIdx = this.networkList.indexOf(this.curNetwork)
|
||||
this.networkList[curNetworkIdx] = cfg
|
||||
this.curNetwork = cfg
|
||||
},
|
||||
|
||||
removeNetworkInstance(instanceId: string) {
|
||||
delete this.instances[instanceId]
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "easytier-web"
|
||||
version = "2.3.0"
|
||||
version = "2.3.2"
|
||||
edition = "2021"
|
||||
description = "Config server for easytier. easytier-core gets config from this and web frontend use it as restful api server."
|
||||
|
||||
|
||||
@@ -147,6 +147,8 @@ const bool_flags: BoolFlag[] = [
|
||||
{ field: 'use_smoltcp', help: 'use_smoltcp_help' },
|
||||
{ field: 'enable_kcp_proxy', help: 'enable_kcp_proxy_help' },
|
||||
{ field: 'disable_kcp_input', help: 'disable_kcp_input_help' },
|
||||
{ field: 'enable_quic_proxy', help: 'enable_quic_proxy_help' },
|
||||
{ field: 'disable_quic_input', help: 'disable_quic_input_help' },
|
||||
{ field: 'disable_p2p', help: 'disable_p2p_help' },
|
||||
{ field: 'bind_device', help: 'bind_device_help' },
|
||||
{ field: 'no_tun', help: 'no_tun_help' },
|
||||
@@ -157,6 +159,7 @@ const bool_flags: BoolFlag[] = [
|
||||
{ field: 'disable_encryption', help: 'disable_encryption_help' },
|
||||
{ field: 'disable_udp_hole_punching', help: 'disable_udp_hole_punching_help' },
|
||||
{ field: 'enable_magic_dns', help: 'enable_magic_dns_help' },
|
||||
{ field: 'enable_private_mode', help: 'enable_private_mode_help' },
|
||||
]
|
||||
|
||||
</script>
|
||||
@@ -199,7 +202,7 @@ const bool_flags: BoolFlag[] = [
|
||||
<div class="flex flex-col gap-2 basis-5/12 grow">
|
||||
<label for="network_secret">{{ t('network_secret') }}</label>
|
||||
<Password id="network_secret" v-model="curNetwork.network_secret"
|
||||
aria-describedby="network_secret-help" toggleMask :feedback="false"/>
|
||||
aria-describedby="network_secret-help" toggleMask :feedback="false" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -270,7 +273,7 @@ const bool_flags: BoolFlag[] = [
|
||||
<div class="flex flex-col gap-2 basis-8/12 grow">
|
||||
<InputGroup>
|
||||
<InputText v-model="curNetwork.vpn_portal_client_network_addr"
|
||||
:placeholder="t('vpn_portal_client_network')" />
|
||||
:placeholder="t('vpn_portal_client_network')" />
|
||||
<InputGroupAddon>
|
||||
<span>/{{ curNetwork.vpn_portal_client_network_len }}</span>
|
||||
</InputGroupAddon>
|
||||
@@ -278,7 +281,7 @@ const bool_flags: BoolFlag[] = [
|
||||
</div>
|
||||
<div class="flex flex-col gap-2 basis-3/12 grow">
|
||||
<InputNumber v-model="curNetwork.vpn_portal_listen_port" :allow-empty="false" :format="false"
|
||||
:min="0" :max="65535" fluid />
|
||||
:min="0" :max="65535" fluid />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -303,6 +306,15 @@ const bool_flags: BoolFlag[] = [
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-row gap-x-9 flex-wrap w-full">
|
||||
<div class="flex flex-col gap-2 grow p-fluid">
|
||||
<label for="">{{ t('rpc_portal_whitelists') }}</label>
|
||||
<AutoComplete id="rpc_portal_whitelists" v-model="curNetwork.rpc_portal_whitelists"
|
||||
:placeholder="t('chips_placeholder', ['127.0.0.0/8'])" class="w-full" multiple fluid
|
||||
:suggestions="inetSuggestions" @complete="searchInetSuggestions" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="flex flex-row gap-x-9 flex-wrap">
|
||||
<div class="flex flex-col gap-2 basis-5/12 grow">
|
||||
<label for="dev_name">{{ t('dev_name') }}</label>
|
||||
@@ -315,11 +327,10 @@ const bool_flags: BoolFlag[] = [
|
||||
<div class="flex flex-col gap-2 basis-5/12 grow">
|
||||
<div class="flex">
|
||||
<label for="mtu">{{ t('mtu') }}</label>
|
||||
<span class="pi pi-question-circle ml-2 self-center"
|
||||
v-tooltip="t('mtu_help')"></span>
|
||||
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mtu_help')"></span>
|
||||
</div>
|
||||
<InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help"
|
||||
:format="false" :placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid/>
|
||||
<InputNumber id="mtu" v-model="curNetwork.mtu" aria-describedby="mtu-help" :format="false"
|
||||
:placeholder="t('mtu_placeholder')" :min="400" :max="1380" fluid />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -328,15 +339,15 @@ const bool_flags: BoolFlag[] = [
|
||||
<div class="flex">
|
||||
<label for="relay_network_whitelist">{{ t('relay_network_whitelist') }}</label>
|
||||
<span class="pi pi-question-circle ml-2 self-center"
|
||||
v-tooltip="t('relay_network_whitelist_help')"></span>
|
||||
v-tooltip="t('relay_network_whitelist_help')"></span>
|
||||
</div>
|
||||
<ToggleButton v-model="curNetwork.enable_relay_network_whitelist" on-icon="pi pi-check" off-icon="pi pi-times"
|
||||
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
|
||||
<ToggleButton v-model="curNetwork.enable_relay_network_whitelist" on-icon="pi pi-check"
|
||||
off-icon="pi pi-times" :on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
|
||||
<div v-if="curNetwork.enable_relay_network_whitelist" class="items-center flex flex-row gap-x-4">
|
||||
<div class="min-w-64 w-full">
|
||||
<AutoComplete id="relay_network_whitelist" v-model="curNetwork.relay_network_whitelist"
|
||||
:placeholder="t('relay_network_whitelist')" class="w-full" multiple fluid
|
||||
:suggestions="whitelistSuggestions" @complete="searchWhitelistSuggestions" />
|
||||
:placeholder="t('relay_network_whitelist')" class="w-full" multiple fluid
|
||||
:suggestions="whitelistSuggestions" @complete="searchWhitelistSuggestions" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -349,12 +360,12 @@ const bool_flags: BoolFlag[] = [
|
||||
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('manual_routes_help')"></span>
|
||||
</div>
|
||||
<ToggleButton v-model="curNetwork.enable_manual_routes" on-icon="pi pi-check" off-icon="pi pi-times"
|
||||
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
|
||||
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
|
||||
<div v-if="curNetwork.enable_manual_routes" class="items-center flex flex-row gap-x-4">
|
||||
<div class="min-w-64 w-full">
|
||||
<AutoComplete id="routes" v-model="curNetwork.routes"
|
||||
:placeholder="t('chips_placeholder', ['192.168.0.0/16'])" class="w-full" multiple fluid
|
||||
:suggestions="inetSuggestions" @complete="searchInetSuggestions" />
|
||||
:placeholder="t('chips_placeholder', ['192.168.0.0/16'])" class="w-full" multiple fluid
|
||||
:suggestions="inetSuggestions" @complete="searchInetSuggestions" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -367,11 +378,11 @@ const bool_flags: BoolFlag[] = [
|
||||
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('socks5_help')"></span>
|
||||
</div>
|
||||
<ToggleButton v-model="curNetwork.enable_socks5" on-icon="pi pi-check" off-icon="pi pi-times"
|
||||
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
|
||||
:on-label="t('off_text')" :off-label="t('on_text')" class="w-48" />
|
||||
<div v-if="curNetwork.enable_socks5" class="items-center flex flex-row gap-x-4">
|
||||
<div class="min-w-64 w-full">
|
||||
<InputNumber id="socks5_port" v-model="curNetwork.socks5_port" aria-describedby="rpc_port-help"
|
||||
:format="false" :allow-empty="false" :min="0" :max="65535" class="w-full"/>
|
||||
:format="false" :allow-empty="false" :min="0" :max="65535" class="w-full" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -384,8 +395,8 @@ const bool_flags: BoolFlag[] = [
|
||||
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('exit_nodes_help')"></span>
|
||||
</div>
|
||||
<AutoComplete id="exit_nodes" v-model="curNetwork.exit_nodes"
|
||||
:placeholder="t('chips_placeholder', ['192.168.8.8'])" class="w-full" multiple fluid
|
||||
:suggestions="exitNodesSuggestions" @complete="searchExitNodesSuggestions" />
|
||||
:placeholder="t('chips_placeholder', ['192.168.8.8'])" class="w-full" multiple fluid
|
||||
:suggestions="exitNodesSuggestions" @complete="searchExitNodesSuggestions" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -396,8 +407,8 @@ const bool_flags: BoolFlag[] = [
|
||||
<span class="pi pi-question-circle ml-2 self-center" v-tooltip="t('mapped_listeners_help')"></span>
|
||||
</div>
|
||||
<AutoComplete id="mapped_listeners" v-model="curNetwork.mapped_listeners"
|
||||
:placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full"
|
||||
multiple fluid :suggestions="peerSuggestions" @complete="searchPeerSuggestions" />
|
||||
:placeholder="t('chips_placeholder', ['tcp://123.123.123.123:11223'])" class="w-full" multiple fluid
|
||||
:suggestions="peerSuggestions" @complete="searchPeerSuggestions" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
103
easytier-web/frontend-lib/src/components/ConfigEditDialog.vue
Normal file
103
easytier-web/frontend-lib/src/components/ConfigEditDialog.vue
Normal file
@@ -0,0 +1,103 @@
|
||||
<script setup lang="ts">
|
||||
import { onMounted, ref, watch } from 'vue';
|
||||
import { NetworkConfig } from '../types/network';
|
||||
import { Divider, Button, Dialog, Textarea } from 'primevue'
|
||||
import { useI18n } from 'vue-i18n'
|
||||
|
||||
const { t } = useI18n()
|
||||
|
||||
const props = defineProps({
|
||||
readonly: {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
},
|
||||
generateConfig: {
|
||||
type: Object as () => (config: NetworkConfig) => Promise<string>,
|
||||
required: true,
|
||||
},
|
||||
saveConfig: {
|
||||
type: Object as () => (config: string) => Promise<void>,
|
||||
required: true,
|
||||
},
|
||||
})
|
||||
|
||||
const curNetwork = defineModel('curNetwork', {
|
||||
type: Object as () => NetworkConfig | undefined,
|
||||
required: true,
|
||||
})
|
||||
|
||||
const visible = defineModel('visible', {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
})
|
||||
watch([visible, curNetwork], async ([newVisible, newCurNetwork]) => {
|
||||
if (!newVisible) {
|
||||
tomlConfig.value = '';
|
||||
return;
|
||||
}
|
||||
if (!newCurNetwork) {
|
||||
tomlConfig.value = '';
|
||||
return;
|
||||
}
|
||||
const config = newCurNetwork;
|
||||
try {
|
||||
errorMessage.value = '';
|
||||
tomlConfig.value = await props.generateConfig(config);
|
||||
} catch (e) {
|
||||
errorMessage.value = 'Failed to generate config: ' + (e instanceof Error ? e.message : String(e));
|
||||
tomlConfig.value = '';
|
||||
}
|
||||
})
|
||||
onMounted(async () => {
|
||||
if (!visible.value) {
|
||||
return;
|
||||
}
|
||||
if (!curNetwork.value) {
|
||||
tomlConfig.value = '';
|
||||
return;
|
||||
}
|
||||
const config = curNetwork.value;
|
||||
try {
|
||||
tomlConfig.value = await props.generateConfig(config);
|
||||
errorMessage.value = '';
|
||||
} catch (e) {
|
||||
errorMessage.value = 'Failed to generate config: ' + (e instanceof Error ? e.message : String(e));
|
||||
tomlConfig.value = '';
|
||||
}
|
||||
});
|
||||
|
||||
const handleConfigSave = async () => {
|
||||
if (props.readonly) return;
|
||||
try {
|
||||
await props.saveConfig(tomlConfig.value);
|
||||
visible.value = false;
|
||||
} catch (e) {
|
||||
errorMessage.value = 'Failed to save config: ' + (e instanceof Error ? e.message : String(e));
|
||||
}
|
||||
};
|
||||
|
||||
const tomlConfig = ref<string>('')
|
||||
const tomlConfigRows = ref<number>(1);
|
||||
const errorMessage = ref<string>('');
|
||||
|
||||
watch(tomlConfig, (newValue) => {
|
||||
tomlConfigRows.value = newValue.split('\n').length;
|
||||
errorMessage.value = '';
|
||||
});
|
||||
|
||||
</script>
|
||||
<template>
|
||||
<Dialog v-model:visible="visible" modal :header="t('config_file')" :style="{ width: '70%' }">
|
||||
<pre v-if="errorMessage"
|
||||
class="mb-2 p-2 rounded text-sm overflow-auto bg-red-100 text-red-700 max-h-40">{{ errorMessage }}</pre>
|
||||
<div class="flex w-full" style="max-height: 60vh; overflow-y: auto;">
|
||||
<Textarea v-model="tomlConfig" class="w-full h-full font-mono flex flex-col resize-none" :rows="tomlConfigRows"
|
||||
spellcheck="false" :readonly="props.readonly"></Textarea>
|
||||
</div>
|
||||
<Divider />
|
||||
<div class="flex gap-2 justify-end">
|
||||
<Button v-if="!props.readonly" type="button" :label="t('save')" @click="handleConfigSave" />
|
||||
<Button type="button" :label="t('close')" @click="visible = false" />
|
||||
</div>
|
||||
</Dialog>
|
||||
</template>
|
||||
@@ -106,6 +106,10 @@ function ipFormat(info: PeerRoutePair) {
|
||||
return ip ? `${IPv4.fromNumber(ip.address.addr)}/${ip.network_length}` : ''
|
||||
}
|
||||
|
||||
function tunnelProto(info: PeerRoutePair) {
|
||||
return [...new Set(info.peer?.conns.map(c => c.tunnel?.tunnel_type))].join(',')
|
||||
}
|
||||
|
||||
const myNodeInfo = computed(() => {
|
||||
if (!props.curNetworkInst)
|
||||
return {} as NodeInfo
|
||||
@@ -311,7 +315,7 @@ function showEventLogs() {
|
||||
<Timeline v-else :value="dialogContent">
|
||||
<template #opposite="slotProps">
|
||||
<small class="text-surface-500 dark:text-surface-400">{{ useTimeAgo(Date.parse(slotProps.item.time))
|
||||
}}</small>
|
||||
}}</small>
|
||||
</template>
|
||||
<template #content="slotProps">
|
||||
<HumanEvent :event="slotProps.item.event" />
|
||||
@@ -408,6 +412,7 @@ function showEventLogs() {
|
||||
</template>
|
||||
</Column>
|
||||
<Column :field="routeCost" :header="t('route_cost')" />
|
||||
<Column :field="tunnelProto" :header="t('tunnel_proto')" />
|
||||
<Column :field="latencyMs" :header="t('latency')" />
|
||||
<Column :field="txBytes" :header="t('upload_bytes')" />
|
||||
<Column :field="rxBytes" :header="t('download_bytes')" />
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
export { default as Config } from './Config.vue';
|
||||
export { default as Status } from './Status.vue';
|
||||
export { default as ConfigEditDialog } from './ConfigEditDialog.vue';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import './style.css'
|
||||
|
||||
import type { App } from 'vue';
|
||||
import { Config, Status } from "./components";
|
||||
import { Config, Status, ConfigEditDialog } from "./components";
|
||||
import Aura from '@primevue/themes/aura'
|
||||
import PrimeVue from 'primevue/config'
|
||||
|
||||
@@ -41,10 +41,11 @@ export default {
|
||||
});
|
||||
|
||||
app.component('Config', Config);
|
||||
app.component('ConfigEditDialog', ConfigEditDialog);
|
||||
app.component('Status', Status);
|
||||
app.component('HumanEvent', HumanEvent);
|
||||
app.directive('tooltip', vTooltip as any);
|
||||
}
|
||||
};
|
||||
|
||||
export { Config, Status, I18nUtils, NetworkTypes, Api, Utils };
|
||||
export { Config, ConfigEditDialog, Status, I18nUtils, NetworkTypes, Api, Utils };
|
||||
|
||||
@@ -18,6 +18,7 @@ advanced_settings: 高级设置
|
||||
basic_settings: 基础设置
|
||||
listener_urls: 监听地址
|
||||
rpc_port: RPC端口
|
||||
rpc_portal_whitelists: RPC白名单
|
||||
config_network: 配置网络
|
||||
running: 运行中
|
||||
error_msg: 错误信息
|
||||
@@ -50,7 +51,11 @@ dev_name_placeholder: 注意:当多个网络同时使用相同的TUN接口名
|
||||
off_text: 点击关闭
|
||||
on_text: 点击开启
|
||||
show_config: 显示配置
|
||||
edit_config: 编辑配置文件
|
||||
config_file: 配置文件
|
||||
close: 关闭
|
||||
save: 保存
|
||||
config_saved: 配置已保存
|
||||
|
||||
use_latency_first: 延迟优先模式
|
||||
my_node_info: 当前节点信息
|
||||
@@ -64,6 +69,7 @@ event_log: 事件日志
|
||||
peer_info: 节点信息
|
||||
hostname: 主机名
|
||||
route_cost: 路由
|
||||
tunnel_proto: 协议
|
||||
latency: 延迟
|
||||
upload_bytes: 上传
|
||||
download_bytes: 下载
|
||||
@@ -83,6 +89,12 @@ enable_kcp_proxy_help: 将 TCP 流量转为 KCP 流量,降低传输延迟,
|
||||
disable_kcp_input: 禁用 KCP 输入
|
||||
disable_kcp_input_help: 禁用 KCP 入站流量,其他开启 KCP 代理的节点仍然使用 TCP 连接到本节点。
|
||||
|
||||
enable_quic_proxy: 启用 QUIC 代理
|
||||
enable_quic_proxy_help: 将 TCP 流量转为 QUIC 流量,降低传输延迟,提升传输速度。
|
||||
|
||||
disable_quic_input: 禁用 QUIC 输入
|
||||
disable_quic_input_help: 禁用 QUIC 入站流量,其他开启 QUIC 代理的节点仍然使用 TCP 连接到本节点。
|
||||
|
||||
disable_p2p: 禁用 P2P
|
||||
disable_p2p_help: 禁用 P2P 模式,所有流量通过手动指定的服务器中转。
|
||||
|
||||
@@ -116,6 +128,10 @@ enable_magic_dns: 启用魔法DNS
|
||||
enable_magic_dns_help: |
|
||||
启用魔法DNS,允许通过EasyTier的DNS服务器访问其他节点的虚拟IPv4地址, 如 node1.et.net。
|
||||
|
||||
enable_private_mode: 启用私有模式
|
||||
enable_private_mode_help: |
|
||||
启用私有模式,则不允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转。
|
||||
|
||||
relay_network_whitelist: 网络白名单
|
||||
relay_network_whitelist_help: |
|
||||
仅转发白名单网络的流量,支持通配符字符串。多个网络名称间可以使用英文空格间隔。
|
||||
|
||||
@@ -18,6 +18,7 @@ advanced_settings: Advanced Settings
|
||||
basic_settings: Basic Settings
|
||||
listener_urls: Listener URLs
|
||||
rpc_port: RPC Port
|
||||
rpc_portal_whitelists: RPC Whitelist
|
||||
config_network: Config Network
|
||||
running: Running
|
||||
error_msg: Error Message
|
||||
@@ -51,7 +52,11 @@ dev_name_placeholder: 'Note: When multiple networks use the same TUN interface n
|
||||
off_text: Press to disable
|
||||
on_text: Press to enable
|
||||
show_config: Show Config
|
||||
edit_config: Edit Config File
|
||||
config_file: Config File
|
||||
close: Close
|
||||
save: Save
|
||||
config_saved: Configuration saved
|
||||
my_node_info: My Node Info
|
||||
peer_count: Connected
|
||||
upload: Upload
|
||||
@@ -62,6 +67,7 @@ show_event_log: Show Event Log
|
||||
event_log: Event Log
|
||||
peer_info: Peer Info
|
||||
route_cost: Route Cost
|
||||
tunnel_proto: Protocol
|
||||
hostname: Hostname
|
||||
latency: Latency
|
||||
upload_bytes: Upload
|
||||
@@ -82,6 +88,12 @@ enable_kcp_proxy_help: Convert TCP traffic to KCP traffic to reduce latency and
|
||||
disable_kcp_input: Disable KCP Input
|
||||
disable_kcp_input_help: Disable inbound KCP traffic, while nodes with KCP proxy enabled continue to connect using TCP.
|
||||
|
||||
enable_quic_proxy: Enable QUIC Proxy
|
||||
enable_quic_proxy_help: Convert TCP traffic to QUIC traffic to reduce latency and boost transmission speed.
|
||||
|
||||
disable_quic_input: Disable QUIC Input
|
||||
disable_quic_input_help: Disable inbound QUIC traffic, while nodes with QUIC proxy enabled continue to connect using TCP.
|
||||
|
||||
disable_p2p: Disable P2P
|
||||
disable_p2p_help: Disable P2P mode; route all traffic through a manually specified relay server.
|
||||
|
||||
@@ -115,6 +127,10 @@ enable_magic_dns: Enable Magic DNS
|
||||
enable_magic_dns_help: |
|
||||
Enable magic dns, all nodes in the network can access each other by domain name, e.g.: node1.et.net.
|
||||
|
||||
enable_private_mode: Enable Private Mode
|
||||
enable_private_mode_help: |
|
||||
Enable private mode, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node.
|
||||
|
||||
relay_network_whitelist: Network Whitelist
|
||||
relay_network_whitelist_help: |
|
||||
Only forward traffic from the whitelist networks, supporting wildcard strings, multiple network names can be separated by spaces.
|
||||
|
||||
@@ -47,6 +47,15 @@ export interface GenerateConfigResponse {
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export interface ParseConfigRequest {
|
||||
toml_config: string;
|
||||
}
|
||||
|
||||
export interface ParseConfigResponse {
|
||||
config?: NetworkConfig;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export class ApiClient {
|
||||
private client: AxiosInstance;
|
||||
private authFailedCb: Function | undefined;
|
||||
@@ -215,6 +224,18 @@ export class ApiClient {
|
||||
return { error: 'Unknown error: ' + error };
|
||||
}
|
||||
}
|
||||
|
||||
public async parse_config(config: ParseConfigRequest): Promise<ParseConfigResponse> {
|
||||
try {
|
||||
const response = await this.client.post<any, ParseConfigResponse>('/parse-config', config);
|
||||
return response;
|
||||
} catch (error) {
|
||||
if (error instanceof AxiosError) {
|
||||
return { error: error.response?.data };
|
||||
}
|
||||
return { error: 'Unknown error: ' + error };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default ApiClient;
|
||||
@@ -39,6 +39,8 @@ export interface NetworkConfig {
|
||||
use_smoltcp?: boolean
|
||||
enable_kcp_proxy?: boolean
|
||||
disable_kcp_input?: boolean
|
||||
enable_quic_proxy?: boolean
|
||||
disable_quic_input?: boolean
|
||||
disable_p2p?: boolean
|
||||
bind_device?: boolean
|
||||
no_tun?: boolean
|
||||
@@ -64,6 +66,9 @@ export interface NetworkConfig {
|
||||
mapped_listeners: string[]
|
||||
|
||||
enable_magic_dns?: boolean
|
||||
enable_private_mode?: boolean
|
||||
|
||||
rpc_portal_whitelists: string[]
|
||||
}
|
||||
|
||||
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
@@ -102,6 +107,8 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
use_smoltcp: false,
|
||||
enable_kcp_proxy: false,
|
||||
disable_kcp_input: false,
|
||||
enable_quic_proxy: false,
|
||||
disable_quic_input: false,
|
||||
disable_p2p: false,
|
||||
bind_device: true,
|
||||
no_tun: false,
|
||||
@@ -121,6 +128,8 @@ export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
|
||||
mtu: null,
|
||||
mapped_listeners: [],
|
||||
enable_magic_dns: false,
|
||||
enable_private_mode: false,
|
||||
rpc_portal_whitelists: [],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
<link rel="icon" type="image/png" href="/easytier.png" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>EasyTier Dashboard</title>
|
||||
<script src="/api_meta.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
import { NetworkTypes } from 'easytier-frontend-lib';
|
||||
import {computed, ref} from 'vue';
|
||||
import { Api } from 'easytier-frontend-lib'
|
||||
import {AutoComplete, Divider} from "primevue";
|
||||
import {AutoComplete, Divider, Button, Textarea} from "primevue";
|
||||
import {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost} from "../modules/api-host"
|
||||
|
||||
const api = computed<Api.ApiClient>(() => new Api.ApiClient(apiHost.value));
|
||||
|
||||
|
||||
const apiHost = ref<string>(getInitialApiHost())
|
||||
const apiHostSuggestions = ref<Array<string>>([])
|
||||
const apiHostSearch = async (event: { query: string }) => {
|
||||
@@ -22,23 +21,46 @@ const apiHostSearch = async (event: { query: string }) => {
|
||||
}
|
||||
|
||||
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
|
||||
const toml_config = ref<string>("Press 'Run Network' to generate TOML configuration");
|
||||
const toml_config = ref<string>("");
|
||||
const errorMessage = ref<string>("");
|
||||
|
||||
const generateConfig = (config: NetworkTypes.NetworkConfig) => {
|
||||
saveApiHost(apiHost.value)
|
||||
errorMessage.value = "";
|
||||
api.value?.generate_config({
|
||||
config: config
|
||||
}).then((res) => {
|
||||
if (res.error) {
|
||||
toml_config.value = res.error;
|
||||
errorMessage.value = "Generation failed: " + res.error;
|
||||
} else if (res.toml_config) {
|
||||
toml_config.value = res.toml_config;
|
||||
} else {
|
||||
toml_config.value = "Api server returned an unexpected response";
|
||||
errorMessage.value = "Api server returned an unexpected response";
|
||||
}
|
||||
}).catch(err => {
|
||||
errorMessage.value = "Generate request failed: " + (err instanceof Error ? err.message : String(err));
|
||||
});
|
||||
};
|
||||
|
||||
const parseConfig = async () => {
|
||||
try {
|
||||
errorMessage.value = "";
|
||||
const res = await api.value?.parse_config({
|
||||
toml_config: toml_config.value
|
||||
});
|
||||
|
||||
if (res.error) {
|
||||
errorMessage.value = "Parse failed: " + res.error;
|
||||
} else if (res.config) {
|
||||
newNetworkConfig.value = res.config;
|
||||
} else {
|
||||
errorMessage.value = "API returned an unexpected response";
|
||||
}
|
||||
} catch (e) {
|
||||
errorMessage.value = "Parse request failed: " + (e instanceof Error ? e.message : String(e));
|
||||
}
|
||||
};
|
||||
|
||||
</script>
|
||||
|
||||
<template>
|
||||
@@ -55,8 +77,17 @@ const generateConfig = (config: NetworkTypes.NetworkConfig) => {
|
||||
</div>
|
||||
<Config :cur-network="newNetworkConfig" @run-network="generateConfig" />
|
||||
</div>
|
||||
<div class="sm:w-full md:w-1/2 p-4 bg-gray-100">
|
||||
<pre class="whitespace-pre-wrap">{{ toml_config }}</pre>
|
||||
<div class="sm:w-full md:w-1/2 p-4 flex flex-col h-[calc(100vh-80px)]">
|
||||
<pre v-if="errorMessage" class="mb-2 p-2 rounded text-sm overflow-auto bg-red-100 text-red-700 max-h-40">{{ errorMessage }}</pre>
|
||||
<Textarea
|
||||
v-model="toml_config"
|
||||
spellcheck="false"
|
||||
class="w-full flex-grow p-2 bg-gray-100 whitespace-pre-wrap font-mono border-none focus:outline-none resize-none"
|
||||
placeholder="Press 'Run Network' to generate TOML configuration, or paste your TOML configuration here to parse it"
|
||||
></Textarea>
|
||||
<div class="mt-3 flex justify-center">
|
||||
<Button label="Parse Config" icon="pi pi-arrow-left" icon-pos="left" @click="parseConfig" />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<script setup lang="ts">
|
||||
import {Toolbar, IftaLabel, Select, Button, ConfirmPopup, Dialog, useConfirm, useToast, Divider} from 'primevue';
|
||||
import { NetworkTypes, Status, Utils, Api, } from 'easytier-frontend-lib';
|
||||
import { Toolbar, IftaLabel, Select, Button, ConfirmPopup, Dialog, useConfirm, useToast, Divider } from 'primevue';
|
||||
import { NetworkTypes, Status, Utils, Api, ConfigEditDialog } from 'easytier-frontend-lib';
|
||||
import { watch, computed, onMounted, onUnmounted, ref } from 'vue';
|
||||
import { useRoute, useRouter } from 'vue-router';
|
||||
|
||||
@@ -33,6 +33,7 @@ const curNetworkInfo = ref<NetworkTypes.NetworkInstance | null>(null);
|
||||
|
||||
const isEditing = ref(false);
|
||||
const showCreateNetworkDialog = ref(false);
|
||||
const showConfigEditDialog = ref(false);
|
||||
const newNetworkConfig = ref<NetworkTypes.NetworkConfig>(NetworkTypes.DEFAULT_NETWORK_CONFIG());
|
||||
|
||||
const listInstanceIdResponse = ref<Api.ListNetworkInstanceIdResponse | undefined>(undefined);
|
||||
@@ -103,7 +104,12 @@ const updateNetworkState = async (disabled: boolean) => {
|
||||
return;
|
||||
}
|
||||
|
||||
await props.api?.update_device_instance_state(deviceId.value, selectedInstanceId.value.uuid, disabled);
|
||||
if (disabled || !disabledNetworkConfig.value) {
|
||||
await props.api?.update_device_instance_state(deviceId.value, selectedInstanceId.value.uuid, disabled);
|
||||
} else if (disabledNetworkConfig.value) {
|
||||
await props.api?.delete_network(deviceId.value, disabledNetworkConfig.value.instance_id);
|
||||
await props.api?.run_network(deviceId.value, disabledNetworkConfig.value);
|
||||
}
|
||||
await loadNetworkInstanceIds();
|
||||
}
|
||||
|
||||
@@ -211,62 +217,97 @@ const loadDeviceInfo = async () => {
|
||||
}
|
||||
|
||||
const exportConfig = async () => {
|
||||
if (!deviceId.value || !instanceId.value) {
|
||||
toast.add({ severity: 'error', summary: 'Error', detail: 'No network instance selected', life: 2000 });
|
||||
return;
|
||||
}
|
||||
if (!deviceId.value || !instanceId.value) {
|
||||
toast.add({ severity: 'error', summary: 'Error', detail: 'No network instance selected', life: 2000 });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
let ret = await props.api?.get_network_config(deviceId.value, instanceId.value);
|
||||
delete ret.instance_id;
|
||||
exportJsonFile(JSON.stringify(ret, null, 2),instanceId.value +'.json');
|
||||
} catch (e: any) {
|
||||
console.error(e);
|
||||
toast.add({ severity: 'error', summary: 'Error', detail: 'Failed to export network config, error: ' + JSON.stringify(e.response.data), life: 2000 });
|
||||
return;
|
||||
}
|
||||
try {
|
||||
let networkConfig = await props.api?.get_network_config(deviceId.value, instanceId.value);
|
||||
delete networkConfig.instance_id;
|
||||
let { toml_config: tomlConfig, error } = await props.api?.generate_config({
|
||||
config: networkConfig
|
||||
});
|
||||
if (error) {
|
||||
throw { response: { data: error } };
|
||||
}
|
||||
exportTomlFile(tomlConfig ?? '', instanceId.value + '.toml');
|
||||
} catch (e: any) {
|
||||
console.error(e);
|
||||
toast.add({ severity: 'error', summary: 'Error', detail: 'Failed to export network config, error: ' + JSON.stringify(e.response.data), life: 2000 });
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const importConfig = () => {
|
||||
configFile.value.click();
|
||||
configFile.value.click();
|
||||
}
|
||||
|
||||
const handleFileUpload = (event: Event) => {
|
||||
const files = (event.target as HTMLInputElement).files;
|
||||
const file = files ? files[0] : null;
|
||||
if (file) {
|
||||
const files = (event.target as HTMLInputElement).files;
|
||||
const file = files ? files[0] : null;
|
||||
if (!file) return;
|
||||
const reader = new FileReader();
|
||||
reader.onload = (e) => {
|
||||
try {
|
||||
let str = e.target?.result?.toString();
|
||||
if(str){
|
||||
const config = JSON.parse(str);
|
||||
if(config === null || typeof config !== "object"){
|
||||
throw new Error();
|
||||
}
|
||||
Object.assign(newNetworkConfig.value, config);
|
||||
toast.add({ severity: 'success', summary: 'Import Success', detail: "Config file import success", life: 2000 });
|
||||
reader.onload = async (e) => {
|
||||
try {
|
||||
let tomlConfig = e.target?.result?.toString();
|
||||
if (!tomlConfig) return;
|
||||
const resp = await props.api?.parse_config({ toml_config: tomlConfig });
|
||||
if (resp.error) {
|
||||
throw resp.error;
|
||||
}
|
||||
|
||||
const config = resp.config;
|
||||
if (!config) return;
|
||||
|
||||
config.instance_id = newNetworkConfig.value?.instance_id ?? config?.instance_id;
|
||||
|
||||
Object.assign(newNetworkConfig.value, resp.config);
|
||||
toast.add({ severity: 'success', summary: 'Import Success', detail: "Config file import success", life: 2000 });
|
||||
} catch (error) {
|
||||
toast.add({ severity: 'error', summary: 'Error', detail: 'Config file parse error: ' + error, life: 2000 });
|
||||
}
|
||||
} catch (error) {
|
||||
toast.add({ severity: 'error', summary: 'Error', detail: 'Config file parse error.', life: 2000 });
|
||||
}
|
||||
configFile.value.value = null;
|
||||
configFile.value.value = null;
|
||||
}
|
||||
reader.readAsText(file);
|
||||
}
|
||||
}
|
||||
|
||||
const exportJsonFile = (context: string, name: string) => {
|
||||
let url = window.URL.createObjectURL(new Blob([context], { type: 'application/json' }));
|
||||
let link = document.createElement('a');
|
||||
link.style.display = 'none';
|
||||
link.href = url;
|
||||
link.setAttribute('download', name);
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
const exportTomlFile = (context: string, name: string) => {
|
||||
let url = window.URL.createObjectURL(new Blob([context], { type: 'application/toml' }));
|
||||
let link = document.createElement('a');
|
||||
link.style.display = 'none';
|
||||
link.href = url;
|
||||
link.setAttribute('download', name);
|
||||
document.body.appendChild(link);
|
||||
link.click();
|
||||
|
||||
document.body.removeChild(link);
|
||||
window.URL.revokeObjectURL(url);
|
||||
document.body.removeChild(link);
|
||||
window.URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
const generateConfig = async (config: NetworkTypes.NetworkConfig): Promise<string> => {
|
||||
let { toml_config: tomlConfig, error } = await props.api?.generate_config({ config });
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
return tomlConfig ?? '';
|
||||
}
|
||||
|
||||
const saveConfig = async (tomlConfig: string): Promise<void> => {
|
||||
let resp = await props.api?.parse_config({ toml_config: tomlConfig });
|
||||
if (resp.error) {
|
||||
throw resp.error;
|
||||
};
|
||||
const config = resp.config;
|
||||
if (!config) {
|
||||
throw new Error("Parsed config is empty");
|
||||
}
|
||||
config.instance_id = disabledNetworkConfig.value?.instance_id ?? config?.instance_id;
|
||||
if (networkIsDisabled.value) {
|
||||
disabledNetworkConfig.value = config;
|
||||
} else {
|
||||
newNetworkConfig.value = config;
|
||||
}
|
||||
}
|
||||
|
||||
let periodFunc = new Utils.PeriodicTask(async () => {
|
||||
@@ -288,18 +329,23 @@ onUnmounted(() => {
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<input type="file" @change="handleFileUpload" class="hidden" accept="application/json" ref="configFile"/>
|
||||
<input type="file" @change="handleFileUpload" class="hidden" accept="application/toml" ref="configFile" />
|
||||
<ConfirmPopup></ConfirmPopup>
|
||||
<Dialog v-model:visible="showCreateNetworkDialog" modal :header="!isEditing ? 'Create New Network' : 'Edit Network'"
|
||||
:style="{ width: '55rem' }">
|
||||
<Dialog v-if="!networkIsDisabled" v-model:visible="showCreateNetworkDialog" modal
|
||||
:header="!isEditing ? 'Create New Network' : 'Edit Network'" :style="{ width: '55rem' }">
|
||||
<div class="flex flex-col">
|
||||
<div class="w-11/12 self-center ">
|
||||
<Button @click="importConfig" icon="pi pi-file-import" label="Import" iconPos="right" />
|
||||
<Divider />
|
||||
</div>
|
||||
<div class="w-11/12 self-center space-x-2">
|
||||
<Button @click="showConfigEditDialog = true" icon="pi pi-pen-to-square" label="Edit File" iconPos="right" />
|
||||
<Button @click="importConfig" icon="pi pi-file-import" label="Import" iconPos="right" />
|
||||
</div>
|
||||
</div>
|
||||
<Divider />
|
||||
<Config :cur-network="newNetworkConfig" @run-network="createNewNetwork"></Config>
|
||||
</Dialog>
|
||||
<ConfigEditDialog v-if="networkIsDisabled" v-model:visible="showCreateNetworkDialog"
|
||||
:cur-network="disabledNetworkConfig" :generate-config="generateConfig" :save-config="saveConfig" />
|
||||
<ConfigEditDialog v-else v-model:visible="showConfigEditDialog" :cur-network="newNetworkConfig"
|
||||
:generate-config="generateConfig" :save-config="saveConfig" />
|
||||
|
||||
<Toolbar>
|
||||
<template #start>
|
||||
@@ -329,7 +375,7 @@ onUnmounted(() => {
|
||||
</Status>
|
||||
<Divider />
|
||||
<div class="text-center">
|
||||
<Button @click="updateNetworkState(true)" label="Disable Network" severity="warn" />
|
||||
<Button @click="updateNetworkState(true)" label="Disable Network" severity="warn" />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -1,10 +1,17 @@
|
||||
const defaultApiHost = 'https://config-server.easytier.cn';
|
||||
|
||||
interface ApiHost {
|
||||
value: string;
|
||||
usedAt: number;
|
||||
}
|
||||
|
||||
let apiMeta: {
|
||||
api_host: string;
|
||||
} | undefined = (window as any).apiMeta;
|
||||
|
||||
// remove trailing slashes from the URL
|
||||
const cleanUrl = (url: string) => url.replace(/\/+$/, '');
|
||||
|
||||
const defaultApiHost = cleanUrl(apiMeta?.api_host ?? `${location.origin}${location.pathname}`);
|
||||
|
||||
const isValidHttpUrl = (s: string): boolean => {
|
||||
let url;
|
||||
|
||||
@@ -45,7 +52,7 @@ const saveApiHost = (host: string) => {
|
||||
}
|
||||
|
||||
let hosts = cleanAndLoadApiHosts();
|
||||
const newHost: ApiHost = {value: host, usedAt: Date.now()};
|
||||
const newHost: ApiHost = { value: host, usedAt: Date.now() };
|
||||
hosts = hosts.filter((h) => h.value !== host);
|
||||
hosts.push(newHost);
|
||||
localStorage.setItem('apiHosts', JSON.stringify(hosts));
|
||||
@@ -61,4 +68,4 @@ const getInitialApiHost = (): string => {
|
||||
}
|
||||
};
|
||||
|
||||
export {getInitialApiHost, cleanAndLoadApiHosts, saveApiHost}
|
||||
export { getInitialApiHost, cleanAndLoadApiHosts, saveApiHost }
|
||||
@@ -3,9 +3,20 @@ import vue from '@vitejs/plugin-vue'
|
||||
// import { viteSingleFile } from "vite-plugin-singlefile"
|
||||
|
||||
const WEB_BASE_URL = process.env.WEB_BASE_URL || '';
|
||||
const API_BASE_URL = process.env.API_BASE_URL || 'http://localhost:11211';
|
||||
|
||||
// https://vite.dev/config/
|
||||
export default defineConfig({
|
||||
base: WEB_BASE_URL,
|
||||
plugins: [vue(),/* viteSingleFile() */],
|
||||
server: {
|
||||
proxy: {
|
||||
"/api": {
|
||||
target: API_BASE_URL,
|
||||
},
|
||||
"/api_meta.js": {
|
||||
target: API_BASE_URL,
|
||||
},
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -27,4 +27,7 @@ cli:
|
||||
zh-CN: "web dashboard 服务器的监听端口, 默认为与 api 服务器端口相同"
|
||||
no_web:
|
||||
en: "Do not run the web dashboard server"
|
||||
zh-CN: "不运行 web dashboard 服务器"
|
||||
zh-CN: "不运行 web dashboard 服务器"
|
||||
api_host:
|
||||
en: "The URL of the API server, used by the web frontend to connect to"
|
||||
zh-CN: "API 服务器的 URL,用于 web 前端连接"
|
||||
@@ -1,21 +1,24 @@
|
||||
pub mod session;
|
||||
pub mod storage;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::{
|
||||
atomic::{AtomicU32, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::scoped_task::ScopedTask, proto::web::HeartbeatRequest, tunnel::TunnelListener,
|
||||
};
|
||||
use easytier::{proto::web::HeartbeatRequest, tunnel::TunnelListener};
|
||||
use session::Session;
|
||||
use storage::{Storage, StorageToken};
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
use crate::db::{Db, UserIdInDb};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ClientManager {
|
||||
accept_task: Option<ScopedTask<()>>,
|
||||
clear_task: Option<ScopedTask<()>>,
|
||||
tasks: JoinSet<()>,
|
||||
|
||||
listeners_cnt: Arc<AtomicU32>,
|
||||
|
||||
client_sessions: Arc<DashMap<url::Url, Arc<Session>>>,
|
||||
storage: Storage,
|
||||
@@ -23,24 +26,35 @@ pub struct ClientManager {
|
||||
|
||||
impl ClientManager {
|
||||
pub fn new(db: Db) -> Self {
|
||||
let client_sessions = Arc::new(DashMap::new());
|
||||
let sessions: Arc<DashMap<url::Url, Arc<Session>>> = client_sessions.clone();
|
||||
let mut tasks = JoinSet::new();
|
||||
tasks.spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(15)).await;
|
||||
sessions.retain(|_, session| session.is_running());
|
||||
}
|
||||
});
|
||||
ClientManager {
|
||||
accept_task: None,
|
||||
clear_task: None,
|
||||
tasks,
|
||||
|
||||
client_sessions: Arc::new(DashMap::new()),
|
||||
listeners_cnt: Arc::new(AtomicU32::new(0)),
|
||||
|
||||
client_sessions,
|
||||
storage: Storage::new(db),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn serve<L: TunnelListener + 'static>(
|
||||
pub async fn add_listener<L: TunnelListener + 'static>(
|
||||
&mut self,
|
||||
mut listener: L,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
listener.listen().await?;
|
||||
|
||||
self.listeners_cnt.fetch_add(1, Ordering::Relaxed);
|
||||
let sessions = self.client_sessions.clone();
|
||||
let storage = self.storage.weak_ref();
|
||||
let task = tokio::spawn(async move {
|
||||
let listeners_cnt = self.listeners_cnt.clone();
|
||||
self.tasks.spawn(async move {
|
||||
while let Ok(tunnel) = listener.accept().await {
|
||||
let info = tunnel.info().unwrap();
|
||||
let client_url: url::Url = info.remote_addr.unwrap().into();
|
||||
@@ -49,24 +63,14 @@ impl ClientManager {
|
||||
session.serve(tunnel).await;
|
||||
sessions.insert(client_url, Arc::new(session));
|
||||
}
|
||||
listeners_cnt.fetch_sub(1, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
self.accept_task = Some(ScopedTask::from(task));
|
||||
|
||||
let sessions = self.client_sessions.clone();
|
||||
let task = tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(15)).await;
|
||||
sessions.retain(|_, session| session.is_running());
|
||||
}
|
||||
});
|
||||
self.clear_task = Some(ScopedTask::from(task));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_running(&self) -> bool {
|
||||
self.accept_task.is_some() && self.clear_task.is_some()
|
||||
self.listeners_cnt.load(Ordering::Relaxed) > 0
|
||||
}
|
||||
|
||||
pub async fn list_sessions(&self) -> Vec<StorageToken> {
|
||||
@@ -132,7 +136,7 @@ mod tests {
|
||||
async fn test_client() {
|
||||
let listener = UdpTunnelListener::new("udp://0.0.0.0:54333".parse().unwrap());
|
||||
let mut mgr = ClientManager::new(Db::memory_db().await);
|
||||
mgr.serve(Box::new(listener)).await.unwrap();
|
||||
mgr.add_listener(Box::new(listener)).await.unwrap();
|
||||
|
||||
mgr.db()
|
||||
.inner()
|
||||
|
||||
@@ -8,11 +8,14 @@ use std::sync::Arc;
|
||||
use clap::Parser;
|
||||
use easytier::{
|
||||
common::{
|
||||
config::{ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, TomlConfigLoader},
|
||||
config::{ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader},
|
||||
constants::EASYTIER_VERSION,
|
||||
error::Error,
|
||||
network::{local_ipv4, local_ipv6},
|
||||
},
|
||||
tunnel::{
|
||||
tcp::TcpTunnelListener, udp::UdpTunnelListener, websocket::WSTunnelListener, TunnelListener,
|
||||
},
|
||||
tunnel::{tcp::TcpTunnelListener, udp::UdpTunnelListener, TunnelListener},
|
||||
utils::{init_logger, setup_panic_handler},
|
||||
};
|
||||
|
||||
@@ -27,7 +30,7 @@ mod web;
|
||||
rust_i18n::i18n!("locales", fallback = "en");
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "easytier-core", author, version = EASYTIER_VERSION , about, long_about = None)]
|
||||
#[command(name = "easytier-web", author, version = EASYTIER_VERSION , about, long_about = None)]
|
||||
struct Cli {
|
||||
#[arg(short, long, default_value = "et.db", help = t!("cli.db").to_string())]
|
||||
db: String,
|
||||
@@ -89,18 +92,67 @@ struct Cli {
|
||||
default_value = "false"
|
||||
)]
|
||||
no_web: bool,
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
#[arg(
|
||||
long,
|
||||
help = t!("cli.api_host").to_string()
|
||||
)]
|
||||
api_host: Option<url::Url>,
|
||||
}
|
||||
|
||||
impl LoggingConfigLoader for &Cli {
|
||||
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
|
||||
ConsoleLoggerConfig {
|
||||
level: self.console_log_level.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_file_logger_config(&self) -> FileLoggerConfig {
|
||||
FileLoggerConfig {
|
||||
dir: self.file_log_dir.clone(),
|
||||
level: self.file_log_level.clone(),
|
||||
file: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_listener_by_url(l: &url::Url) -> Result<Box<dyn TunnelListener>, Error> {
|
||||
Ok(match l.scheme() {
|
||||
"tcp" => Box::new(TcpTunnelListener::new(l.clone())),
|
||||
"udp" => Box::new(UdpTunnelListener::new(l.clone())),
|
||||
"ws" => Box::new(WSTunnelListener::new(l.clone())),
|
||||
_ => {
|
||||
return Err(Error::InvalidUrl(l.to_string()));
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async fn get_dual_stack_listener(
|
||||
protocol: &str,
|
||||
port: u16,
|
||||
) -> Result<
|
||||
(
|
||||
Option<Box<dyn TunnelListener>>,
|
||||
Option<Box<dyn TunnelListener>>,
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
let is_protocol_support_dual_stack =
|
||||
protocol.trim().to_lowercase() == "tcp" || protocol.trim().to_lowercase() == "udp";
|
||||
let v6_listener = if is_protocol_support_dual_stack && local_ipv6().await.is_ok() {
|
||||
get_listener_by_url(&format!("{}://[::0]:{}", protocol, port).parse().unwrap()).ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let v4_listener = if let Ok(_) = local_ipv4().await {
|
||||
get_listener_by_url(&format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap()).ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
Ok((v6_listener, v4_listener))
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let locale = sys_locale::get_locale().unwrap_or_else(|| String::from("en-US"));
|
||||
@@ -108,64 +160,71 @@ async fn main() {
|
||||
setup_panic_handler();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let config = TomlConfigLoader::default();
|
||||
config.set_console_logger_config(ConsoleLoggerConfig {
|
||||
level: cli.console_log_level,
|
||||
});
|
||||
config.set_file_logger_config(FileLoggerConfig {
|
||||
dir: cli.file_log_dir,
|
||||
level: cli.file_log_level,
|
||||
file: None,
|
||||
});
|
||||
init_logger(config, false).unwrap();
|
||||
init_logger(&cli, false).unwrap();
|
||||
|
||||
// let db = db::Db::new(":memory:").await.unwrap();
|
||||
let db = db::Db::new(cli.db).await.unwrap();
|
||||
|
||||
let listener = get_listener_by_url(
|
||||
&format!(
|
||||
"{}://0.0.0.0:{}",
|
||||
cli.config_server_protocol, cli.config_server_port
|
||||
)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let mut mgr = client_manager::ClientManager::new(db.clone());
|
||||
mgr.serve(listener).await.unwrap();
|
||||
let (v6_listener, v4_listener) =
|
||||
get_dual_stack_listener(&cli.config_server_protocol, cli.config_server_port)
|
||||
.await
|
||||
.unwrap();
|
||||
if v4_listener.is_none() && v6_listener.is_none() {
|
||||
panic!("Listen to both IPv4 and IPv6 failed");
|
||||
}
|
||||
if let Some(listener) = v6_listener {
|
||||
mgr.add_listener(listener).await.unwrap();
|
||||
}
|
||||
if let Some(listener) = v4_listener {
|
||||
mgr.add_listener(listener).await.unwrap();
|
||||
}
|
||||
|
||||
let mgr = Arc::new(mgr);
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
let restful_also_serve_web = !cli.no_web
|
||||
&& (cli.web_server_port.is_none() || cli.web_server_port == Some(cli.api_server_port));
|
||||
|
||||
let (web_router_restful, web_router_static) = if cli.no_web {
|
||||
(None, None)
|
||||
} else {
|
||||
let web_router = web::build_router(cli.api_host.clone());
|
||||
if cli.web_server_port.is_none() || cli.web_server_port == Some(cli.api_server_port) {
|
||||
(Some(web_router), None)
|
||||
} else {
|
||||
(None, Some(web_router))
|
||||
}
|
||||
};
|
||||
#[cfg(not(feature = "embed"))]
|
||||
let restful_also_serve_web = false;
|
||||
let web_router_restful = None;
|
||||
|
||||
let mut restful_server = restful::RestfulServer::new(
|
||||
let _restful_server_tasks = restful::RestfulServer::new(
|
||||
format!("0.0.0.0:{}", cli.api_server_port).parse().unwrap(),
|
||||
mgr.clone(),
|
||||
db,
|
||||
restful_also_serve_web,
|
||||
web_router_restful,
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.start()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
restful_server.start().await.unwrap();
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
let mut web_server = web::WebServer::new(
|
||||
format!("0.0.0.0:{}", cli.web_server_port.unwrap_or(0))
|
||||
.parse()
|
||||
let _web_server_task = if let Some(web_router) = web_router_static {
|
||||
Some(
|
||||
web::WebServer::new(
|
||||
format!("0.0.0.0:{}", cli.web_server_port.unwrap_or(0))
|
||||
.parse()
|
||||
.unwrap(),
|
||||
web_router,
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.start()
|
||||
.await
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
if !cli.no_web && !restful_also_serve_web {
|
||||
web_server.start().await.unwrap();
|
||||
}
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ use axum::{extract::State, routing::get, Json, Router};
|
||||
use axum_login::tower_sessions::{ExpiredDeletion, SessionManagerLayer};
|
||||
use axum_login::{login_required, AuthManagerLayerBuilder, AuthUser, AuthzBackend};
|
||||
use axum_messages::MessagesManagerLayer;
|
||||
use easytier::common::config::ConfigLoader;
|
||||
use easytier::common::config::{ConfigLoader, TomlConfigLoader};
|
||||
use easytier::common::scoped_task::ScopedTask;
|
||||
use easytier::launcher::NetworkConfig;
|
||||
use easytier::proto::rpc_types;
|
||||
@@ -39,12 +39,11 @@ pub struct RestfulServer {
|
||||
client_mgr: Arc<ClientManager>,
|
||||
db: Db,
|
||||
|
||||
serve_task: Option<ScopedTask<()>>,
|
||||
delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>,
|
||||
|
||||
// serve_task: Option<ScopedTask<()>>,
|
||||
// delete_task: Option<ScopedTask<tower_sessions::session_store::Result<()>>>,
|
||||
network_api: NetworkApi,
|
||||
|
||||
enable_web_embed: bool,
|
||||
web_router: Option<Router>,
|
||||
}
|
||||
|
||||
type AppStateInner = Arc<ClientManager>;
|
||||
@@ -69,6 +68,17 @@ struct GenerateConfigResponse {
|
||||
toml_config: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
struct ParseConfigRequest {
|
||||
toml_config: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
struct ParseConfigResponse {
|
||||
error: Option<String>,
|
||||
config: Option<NetworkConfig>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct Error {
|
||||
message: String,
|
||||
@@ -94,7 +104,7 @@ impl RestfulServer {
|
||||
bind_addr: SocketAddr,
|
||||
client_mgr: Arc<ClientManager>,
|
||||
db: Db,
|
||||
enable_web_embed: bool,
|
||||
web_router: Option<Router>,
|
||||
) -> anyhow::Result<Self> {
|
||||
assert!(client_mgr.is_running());
|
||||
|
||||
@@ -104,10 +114,10 @@ impl RestfulServer {
|
||||
bind_addr,
|
||||
client_mgr,
|
||||
db,
|
||||
serve_task: None,
|
||||
delete_task: None,
|
||||
// serve_task: None,
|
||||
// delete_task: None,
|
||||
network_api,
|
||||
enable_web_embed,
|
||||
web_router,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -159,7 +169,34 @@ impl RestfulServer {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&mut self) -> Result<(), anyhow::Error> {
|
||||
async fn handle_parse_config(
|
||||
Json(req): Json<ParseConfigRequest>,
|
||||
) -> Result<Json<ParseConfigResponse>, HttpHandleError> {
|
||||
let config = TomlConfigLoader::new_from_str(&req.toml_config)
|
||||
.and_then(|config| NetworkConfig::new_from_config(&config));
|
||||
match config {
|
||||
Ok(c) => Ok(ParseConfigResponse {
|
||||
error: None,
|
||||
config: Some(c),
|
||||
}
|
||||
.into()),
|
||||
Err(e) => Ok(ParseConfigResponse {
|
||||
error: Some(format!("{:?}", e)),
|
||||
config: None,
|
||||
}
|
||||
.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(
|
||||
mut self,
|
||||
) -> Result<
|
||||
(
|
||||
ScopedTask<()>,
|
||||
ScopedTask<tower_sessions::session_store::Result<()>>,
|
||||
),
|
||||
anyhow::Error,
|
||||
> {
|
||||
let listener = TcpListener::bind(self.bind_addr).await?;
|
||||
|
||||
// Session layer.
|
||||
@@ -169,14 +206,13 @@ impl RestfulServer {
|
||||
let session_store = SqliteStore::new(self.db.inner());
|
||||
session_store.migrate().await?;
|
||||
|
||||
self.delete_task.replace(
|
||||
let delete_task: ScopedTask<tower_sessions::session_store::Result<()>> =
|
||||
tokio::task::spawn(
|
||||
session_store
|
||||
.clone()
|
||||
.continuously_delete_expired(tokio::time::Duration::from_secs(60)),
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
.into();
|
||||
|
||||
// Generate a cryptographic key to sign the session cookie.
|
||||
let key = Key::generate();
|
||||
@@ -210,25 +246,24 @@ impl RestfulServer {
|
||||
"/api/v1/generate-config",
|
||||
post(Self::handle_generate_config),
|
||||
)
|
||||
.route("/api/v1/parse-config", post(Self::handle_parse_config))
|
||||
.layer(MessagesManagerLayer)
|
||||
.layer(auth_layer)
|
||||
.layer(tower_http::cors::CorsLayer::very_permissive())
|
||||
.layer(compression_layer);
|
||||
|
||||
#[cfg(feature = "embed")]
|
||||
let app = if self.enable_web_embed {
|
||||
use axum_embed::ServeEmbed;
|
||||
let service = ServeEmbed::<Assets>::new();
|
||||
app.fallback_service(service)
|
||||
let app = if let Some(web_router) = self.web_router.take() {
|
||||
app.merge(web_router)
|
||||
} else {
|
||||
app
|
||||
};
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
let serve_task: ScopedTask<()> = tokio::spawn(async move {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
});
|
||||
self.serve_task = Some(task.into());
|
||||
})
|
||||
.into();
|
||||
|
||||
Ok(())
|
||||
Ok((serve_task, delete_task))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
use axum::Router;
|
||||
use axum::{
|
||||
extract::State,
|
||||
http::header,
|
||||
response::{IntoResponse, Response},
|
||||
routing, Router,
|
||||
};
|
||||
use axum_embed::ServeEmbed;
|
||||
use easytier::common::scoped_task::ScopedTask;
|
||||
use rust_embed::RustEmbed;
|
||||
use std::net::SocketAddr;
|
||||
use axum_embed::ServeEmbed;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
/// Embed assets for web dashboard, build frontend first
|
||||
@@ -10,30 +15,72 @@ use tokio::net::TcpListener;
|
||||
#[folder = "frontend/dist/"]
|
||||
struct Assets;
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
struct ApiMetaResponse {
|
||||
api_host: String,
|
||||
}
|
||||
|
||||
async fn handle_api_meta(State(api_host): State<url::Url>) -> impl IntoResponse {
|
||||
Response::builder()
|
||||
.header(
|
||||
header::CONTENT_TYPE,
|
||||
"application/javascript; charset=utf-8",
|
||||
)
|
||||
.header(header::CACHE_CONTROL, "no-cache, no-store, must-revalidate")
|
||||
.header(header::PRAGMA, "no-cache")
|
||||
.header(header::EXPIRES, "0")
|
||||
.body(format!(
|
||||
"window.apiMeta = {}",
|
||||
serde_json::to_string(&ApiMetaResponse {
|
||||
api_host: api_host.to_string()
|
||||
})
|
||||
.unwrap(),
|
||||
))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn build_router(api_host: Option<url::Url>) -> Router {
|
||||
let service = ServeEmbed::<Assets>::new();
|
||||
let router = Router::new();
|
||||
|
||||
let router = if let Some(api_host) = api_host {
|
||||
let sub_router = Router::new()
|
||||
.route("/api_meta.js", routing::get(handle_api_meta))
|
||||
.with_state(api_host);
|
||||
router.merge(sub_router)
|
||||
} else {
|
||||
router
|
||||
};
|
||||
|
||||
let router = router.fallback_service(service);
|
||||
|
||||
router
|
||||
}
|
||||
|
||||
pub struct WebServer {
|
||||
bind_addr: SocketAddr,
|
||||
router: Router,
|
||||
serve_task: Option<ScopedTask<()>>,
|
||||
}
|
||||
|
||||
impl WebServer {
|
||||
pub async fn new(bind_addr: SocketAddr) -> anyhow::Result<Self> {
|
||||
pub async fn new(bind_addr: SocketAddr, router: Router) -> anyhow::Result<Self> {
|
||||
Ok(WebServer {
|
||||
bind_addr,
|
||||
router,
|
||||
serve_task: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start(&mut self) -> Result<(), anyhow::Error> {
|
||||
pub async fn start(self) -> Result<ScopedTask<()>, anyhow::Error> {
|
||||
let listener = TcpListener::bind(self.bind_addr).await?;
|
||||
let service = ServeEmbed::<Assets>::new();
|
||||
let app = Router::new().fallback_service(service);
|
||||
let app = self.router;
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
});
|
||||
})
|
||||
.into();
|
||||
|
||||
self.serve_task = Some(task.into());
|
||||
|
||||
Ok(())
|
||||
Ok(task)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ name = "easytier"
|
||||
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
|
||||
homepage = "https://github.com/EasyTier/EasyTier"
|
||||
repository = "https://github.com/EasyTier/EasyTier"
|
||||
version = "2.3.0"
|
||||
version = "2.3.2"
|
||||
edition = "2021"
|
||||
authors = ["kkrainbow"]
|
||||
keywords = ["vpn", "p2p", "network", "easytier"]
|
||||
@@ -64,7 +64,8 @@ bytes = "1.5.0"
|
||||
pin-project-lite = "0.2.13"
|
||||
tachyonix = "0.3.0"
|
||||
|
||||
quinn = { version = "0.11.0", optional = true, features = ["ring"] }
|
||||
quinn = { version = "0.11.8", optional = true, features = ["ring"] }
|
||||
|
||||
rustls = { version = "0.23.0", features = [
|
||||
"ring",
|
||||
], default-features = false, optional = true }
|
||||
@@ -138,6 +139,7 @@ network-interface = "2.0"
|
||||
|
||||
# for ospf route
|
||||
petgraph = "0.8.1"
|
||||
hashbrown = "0.15.3"
|
||||
|
||||
# for wireguard
|
||||
boringtun = { package = "boringtun-easytier", version = "0.6.1", optional = true }
|
||||
@@ -153,7 +155,7 @@ humansize = "2.1.3"
|
||||
|
||||
base64 = "0.22"
|
||||
|
||||
mimalloc-rust = { git = "https://github.com/EasyTier/mimalloc-rust", optional = true }
|
||||
mimalloc = { version = "*", optional = true }
|
||||
|
||||
# mips
|
||||
atomic-shim = "0.2.0"
|
||||
@@ -185,10 +187,7 @@ async-ringbuf = "0.3.1"
|
||||
|
||||
service-manager = { git = "https://github.com/chipsenkbeil/service-manager-rs.git", branch = "main" }
|
||||
|
||||
async-compression = { version = "0.4.17", default-features = false, features = [
|
||||
"zstd",
|
||||
"tokio",
|
||||
] }
|
||||
zstd = { version = "0.13" }
|
||||
|
||||
kcp-sys = { git = "https://github.com/EasyTier/kcp-sys" }
|
||||
|
||||
@@ -260,7 +259,7 @@ prost-reflect-build = { version = "0.14.0" }
|
||||
|
||||
[target.'cfg(windows)'.build-dependencies]
|
||||
reqwest = { version = "0.12.12", features = ["blocking"] }
|
||||
zip = "0.6.6"
|
||||
zip = "4.0.0"
|
||||
|
||||
# enable thunk-rs when compiling for x86_64 or i686 windows
|
||||
[target.x86_64-pc-windows-msvc.build-dependencies]
|
||||
@@ -272,7 +271,7 @@ thunk-rs = { git = "https://github.com/easytier/thunk.git", default-features = f
|
||||
|
||||
[dev-dependencies]
|
||||
serial_test = "3.0.0"
|
||||
rstest = "0.18.2"
|
||||
rstest = "0.25.0"
|
||||
futures-util = "0.3.30"
|
||||
maplit = "1.0.2"
|
||||
|
||||
@@ -282,9 +281,8 @@ tokio-socks = "0.5.2"
|
||||
|
||||
|
||||
[features]
|
||||
default = ["wireguard", "mimalloc", "websocket", "smoltcp", "tun", "socks5"]
|
||||
default = ["wireguard", "mimalloc", "websocket", "smoltcp", "tun", "socks5", "quic"]
|
||||
full = [
|
||||
"quic",
|
||||
"websocket",
|
||||
"wireguard",
|
||||
"mimalloc",
|
||||
@@ -293,10 +291,9 @@ full = [
|
||||
"tun",
|
||||
"socks5",
|
||||
]
|
||||
mips = ["aes-gcm", "mimalloc", "wireguard", "tun", "smoltcp", "socks5"]
|
||||
wireguard = ["dep:boringtun", "dep:ring"]
|
||||
quic = ["dep:quinn", "dep:rustls", "dep:rcgen"]
|
||||
mimalloc = ["dep:mimalloc-rust"]
|
||||
mimalloc = ["dep:mimalloc"]
|
||||
aes-gcm = ["dep:aes-gcm"]
|
||||
tun = ["dep:tun"]
|
||||
websocket = [
|
||||
|
||||
@@ -10,6 +10,11 @@ core_clap:
|
||||
配置服务器地址。允许格式:
|
||||
完整URL:--config-server udp://127.0.0.1:22020/admin
|
||||
仅用户名:--config-server admin,将使用官方的服务器
|
||||
machine_id:
|
||||
en: |+
|
||||
the machine id to identify this machine, used for config recovery after disconnection, must be unique and fixed. default is from system.
|
||||
zh-CN: |+
|
||||
Web 配置服务器通过 machine id 来识别机器,用于断线重连后的配置恢复,需要保证唯一且固定不变。默认从系统获得。
|
||||
config_file:
|
||||
en: "path to the config file, NOTE: the options set by cmdline args will override options in config file"
|
||||
zh-CN: "配置文件路径,注意:命令行中的配置的选项会覆盖配置文件中的选项"
|
||||
@@ -32,11 +37,20 @@ core_clap:
|
||||
en: "use a public shared node to discover peers"
|
||||
zh-CN: "使用公共共享节点来发现对等节点"
|
||||
proxy_networks:
|
||||
en: "export local networks to other peers in the vpn"
|
||||
zh-CN: "将本地网络导出到VPN中的其他对等节点"
|
||||
en: |+
|
||||
export local networks to other peers in the vpn, e.g.: 10.0.0.0/24.
|
||||
also support mapping proxy network to other cidr, e.g.: 10.0.0.0/24->192.168.0.0/24
|
||||
other peers can access 10.0.0.1 with ip 192.168.0.1
|
||||
zh-CN: |+
|
||||
将本地网络导出到VPN中的其他对等节点,例如:10.0.0.0/24。
|
||||
还支持将代理网络映射到其他CIDR,例如:10.0.0.0/24->192.168.0.0/24
|
||||
其他对等节点可以通过 IP 192.168.0.1 来访问 10.0.0.1
|
||||
rpc_portal:
|
||||
en: "rpc portal address to listen for management. 0 means random port, 12345 means listen on 12345 of localhost, 0.0.0.0:12345 means listen on 12345 of all interfaces. default is 0 and will try 15888 first"
|
||||
zh-CN: "用于管理的RPC门户地址。0表示随机端口,12345表示在localhost的12345上监听,0.0.0.0:12345表示在所有接口的12345上监听。默认是0,首先尝试15888"
|
||||
rpc_portal_whitelist:
|
||||
en: "rpc portal whitelist, only allow these addresses to access rpc portal, e.g.: 127.0.0.1,127.0.0.0/8,::1/128"
|
||||
zh-CN: "RPC门户白名单,仅允许这些地址访问RPC门户,例如:127.0.0.1/32,127.0.0.0/8,::1/128"
|
||||
listeners:
|
||||
en: |+
|
||||
listeners to accept connections, allow format:
|
||||
@@ -149,12 +163,21 @@ core_clap:
|
||||
disable_kcp_input:
|
||||
en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved."
|
||||
zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。"
|
||||
enable_quic_proxy:
|
||||
en: "proxy tcp streams with QUIC, improving the latency and throughput on the network with udp packet loss."
|
||||
zh-CN: "使用 QUIC 代理 TCP 流,提高在 UDP 丢包网络上的延迟和吞吐量。"
|
||||
disable_quic_input:
|
||||
en: "do not allow other nodes to use QUIC to proxy tcp streams to this node. when a node with QUIC proxy enabled accesses this node, the original tcp connection is preserved."
|
||||
zh-CN: "不允许其他节点使用 QUIC 代理 TCP 流到此节点。开启 QUIC 代理的节点访问此节点时,依然使用原始 TCP 连接。"
|
||||
port_forward:
|
||||
en: "forward local port to remote port in virtual network. e.g.: udp://0.0.0.0:12345/10.126.126.1:23456, means forward local udp port 12345 to 10.126.126.1:23456 in the virtual network. can specify multiple."
|
||||
zh-CN: "将本地端口转发到虚拟网络中的远程端口。例如:udp://0.0.0.0:12345/10.126.126.1:23456,表示将本地UDP端口12345转发到虚拟网络中的10.126.126.1:23456。可以指定多个。"
|
||||
accept_dns:
|
||||
en: "if true, enable magic dns. with magic dns, you can access other nodes with a domain name, e.g.: <hostname>.et.net. magic dns will modify your system dns settings, enable it carefully."
|
||||
zh-CN: "如果为true,则启用魔法DNS。使用魔法DNS,您可以使用域名访问其他节点,例如:<hostname>.et.net。魔法DNS将修改您的系统DNS设置,请谨慎启用。"
|
||||
private_mode:
|
||||
en: "if true, nodes with different network names or passwords from this network are not allowed to perform handshake or relay through this node."
|
||||
zh-CN: "如果为true,则不允许使用了与本网络不相同的网络名称和密码的节点通过本节点进行握手或中转"
|
||||
|
||||
core_app:
|
||||
panic_backtrace_save:
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use async_compression::tokio::write::{ZstdDecoder, ZstdEncoder};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use anyhow::Context;
|
||||
use dashmap::DashMap;
|
||||
use std::cell::RefCell;
|
||||
use zstd::bulk;
|
||||
|
||||
use zerocopy::{AsBytes as _, FromBytes as _};
|
||||
|
||||
@@ -29,17 +31,19 @@ impl DefaultCompressor {
|
||||
data: &[u8],
|
||||
compress_algo: CompressorAlgo,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let buf = match compress_algo {
|
||||
CompressorAlgo::ZstdDefault => {
|
||||
let mut o = ZstdEncoder::new(Vec::new());
|
||||
o.write_all(data).await?;
|
||||
o.shutdown().await?;
|
||||
o.into_inner()
|
||||
}
|
||||
CompressorAlgo::None => data.to_vec(),
|
||||
};
|
||||
|
||||
Ok(buf)
|
||||
match compress_algo {
|
||||
CompressorAlgo::ZstdDefault => CTX_MAP.with(|map_cell| {
|
||||
let map = map_cell.borrow();
|
||||
let mut ctx_entry = map.entry(compress_algo).or_default();
|
||||
ctx_entry.compress(data).with_context(|| {
|
||||
format!(
|
||||
"Failed to compress data with algorithm: {:?}",
|
||||
compress_algo
|
||||
)
|
||||
})
|
||||
}),
|
||||
CompressorAlgo::None => Ok(data.to_vec()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn decompress_raw(
|
||||
@@ -47,17 +51,30 @@ impl DefaultCompressor {
|
||||
data: &[u8],
|
||||
compress_algo: CompressorAlgo,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let buf = match compress_algo {
|
||||
CompressorAlgo::ZstdDefault => {
|
||||
let mut o = ZstdDecoder::new(Vec::new());
|
||||
o.write_all(data).await?;
|
||||
o.shutdown().await?;
|
||||
o.into_inner()
|
||||
}
|
||||
CompressorAlgo::None => data.to_vec(),
|
||||
};
|
||||
|
||||
Ok(buf)
|
||||
match compress_algo {
|
||||
CompressorAlgo::ZstdDefault => DCTX_MAP.with(|map_cell| {
|
||||
let map = map_cell.borrow();
|
||||
let mut ctx_entry = map.entry(compress_algo).or_default();
|
||||
for i in 1..=5 {
|
||||
let mut len = data.len() * 2usize.pow(i);
|
||||
if i == 5 && len < 64 * 1024 {
|
||||
len = 64 * 1024; // Ensure a minimum buffer size
|
||||
}
|
||||
match ctx_entry.decompress(data, len) {
|
||||
Ok(buf) => return Ok(buf),
|
||||
Err(e) if e.to_string().contains("buffer is too small") => {
|
||||
continue; // Try with a larger buffer
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
Err(anyhow::anyhow!(
|
||||
"Failed to decompress data after multiple attempts with algorithm: {:?}",
|
||||
compress_algo
|
||||
))
|
||||
}),
|
||||
CompressorAlgo::None => Ok(data.to_vec()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,6 +163,11 @@ impl Compressor for DefaultCompressor {
|
||||
}
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
static CTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Compressor<'static>>> = RefCell::new(DashMap::new());
|
||||
static DCTX_MAP: RefCell<DashMap<CompressorAlgo, bulk::Decompressor<'static>>> = RefCell::new(DashMap::new());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
@@ -158,10 +180,21 @@ pub mod tests {
|
||||
|
||||
let compressor = DefaultCompressor {};
|
||||
|
||||
println!(
|
||||
"Uncompressed packet: {:?}, len: {}",
|
||||
packet,
|
||||
packet.payload_len()
|
||||
);
|
||||
|
||||
compressor
|
||||
.compress(&mut packet, CompressorAlgo::ZstdDefault)
|
||||
.await
|
||||
.unwrap();
|
||||
println!(
|
||||
"Compressed packet: {:?}, len: {}",
|
||||
packet,
|
||||
packet.payload_len()
|
||||
);
|
||||
assert_eq!(packet.peer_manager_header().unwrap().is_compressed(), true);
|
||||
|
||||
compressor.decompress(&mut packet).await.unwrap();
|
||||
|
||||
@@ -2,9 +2,11 @@ use std::{
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
path::PathBuf,
|
||||
sync::{Arc, Mutex},
|
||||
u64,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use cidr::IpCidr;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@@ -37,6 +39,10 @@ pub fn gen_default_flags() -> Flags {
|
||||
disable_kcp_input: false,
|
||||
disable_relay_kcp: true,
|
||||
accept_dns: false,
|
||||
private_mode: false,
|
||||
enable_quic_proxy: false,
|
||||
disable_quic_input: false,
|
||||
foreign_relay_bps_limit: u64::MAX,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,20 +66,15 @@ pub trait ConfigLoader: Send + Sync {
|
||||
fn get_dhcp(&self) -> bool;
|
||||
fn set_dhcp(&self, dhcp: bool);
|
||||
|
||||
fn add_proxy_cidr(&self, cidr: cidr::IpCidr);
|
||||
fn remove_proxy_cidr(&self, cidr: cidr::IpCidr);
|
||||
fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr>;
|
||||
fn add_proxy_cidr(&self, cidr: cidr::Ipv4Cidr, mapped_cidr: Option<cidr::Ipv4Cidr>);
|
||||
fn remove_proxy_cidr(&self, cidr: cidr::Ipv4Cidr);
|
||||
fn get_proxy_cidrs(&self) -> Vec<ProxyNetworkConfig>;
|
||||
|
||||
fn get_network_identity(&self) -> NetworkIdentity;
|
||||
fn set_network_identity(&self, identity: NetworkIdentity);
|
||||
|
||||
fn get_listener_uris(&self) -> Vec<url::Url>;
|
||||
|
||||
fn get_file_logger_config(&self) -> FileLoggerConfig;
|
||||
fn set_file_logger_config(&self, config: FileLoggerConfig);
|
||||
fn get_console_logger_config(&self) -> ConsoleLoggerConfig;
|
||||
fn set_console_logger_config(&self, config: ConsoleLoggerConfig);
|
||||
|
||||
fn get_peers(&self) -> Vec<PeerConfig>;
|
||||
fn set_peers(&self, peers: Vec<PeerConfig>);
|
||||
|
||||
@@ -86,6 +87,9 @@ pub trait ConfigLoader: Send + Sync {
|
||||
fn get_rpc_portal(&self) -> Option<SocketAddr>;
|
||||
fn set_rpc_portal(&self, addr: SocketAddr);
|
||||
|
||||
fn get_rpc_portal_whitelist(&self) -> Option<Vec<IpCidr>>;
|
||||
fn set_rpc_portal_whitelist(&self, whitelist: Option<Vec<IpCidr>>);
|
||||
|
||||
fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig>;
|
||||
fn set_vpn_portal_config(&self, config: VpnPortalConfig);
|
||||
|
||||
@@ -107,6 +111,12 @@ pub trait ConfigLoader: Send + Sync {
|
||||
fn dump(&self) -> String;
|
||||
}
|
||||
|
||||
pub trait LoggingConfigLoader {
|
||||
fn get_file_logger_config(&self) -> FileLoggerConfig;
|
||||
|
||||
fn get_console_logger_config(&self) -> ConsoleLoggerConfig;
|
||||
}
|
||||
|
||||
pub type NetworkSecretDigest = [u8; 32];
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Default, Eq, Hash)]
|
||||
@@ -165,7 +175,8 @@ pub struct PeerConfig {
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
pub struct ProxyNetworkConfig {
|
||||
pub cidr: String,
|
||||
pub cidr: cidr::Ipv4Cidr, // the CIDR of the proxy network
|
||||
pub mapped_cidr: Option<cidr::Ipv4Cidr>, // allow remap the proxy CIDR to another CIDR
|
||||
pub allow: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
@@ -181,6 +192,24 @@ pub struct ConsoleLoggerConfig {
|
||||
pub level: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, derive_builder::Builder)]
|
||||
pub struct LoggingConfig {
|
||||
#[builder(setter(into, strip_option), default = None)]
|
||||
file_logger: Option<FileLoggerConfig>,
|
||||
#[builder(setter(into, strip_option), default = None)]
|
||||
console_logger: Option<ConsoleLoggerConfig>,
|
||||
}
|
||||
|
||||
impl LoggingConfigLoader for &LoggingConfig {
|
||||
fn get_file_logger_config(&self) -> FileLoggerConfig {
|
||||
self.file_logger.clone().unwrap_or_default()
|
||||
}
|
||||
|
||||
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
|
||||
self.console_logger.clone().unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
|
||||
pub struct VpnPortalConfig {
|
||||
pub client_cidr: cidr::Ipv4Cidr,
|
||||
@@ -238,10 +267,8 @@ struct Config {
|
||||
peer: Option<Vec<PeerConfig>>,
|
||||
proxy_network: Option<Vec<ProxyNetworkConfig>>,
|
||||
|
||||
file_logger: Option<FileLoggerConfig>,
|
||||
console_logger: Option<ConsoleLoggerConfig>,
|
||||
|
||||
rpc_portal: Option<SocketAddr>,
|
||||
rpc_portal_whitelist: Option<Vec<IpCidr>>,
|
||||
|
||||
vpn_portal_config: Option<VpnPortalConfig>,
|
||||
|
||||
@@ -396,50 +423,52 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
self.config.lock().unwrap().dhcp = Some(dhcp);
|
||||
}
|
||||
|
||||
fn add_proxy_cidr(&self, cidr: cidr::IpCidr) {
|
||||
fn add_proxy_cidr(&self, cidr: cidr::Ipv4Cidr, mapped_cidr: Option<cidr::Ipv4Cidr>) {
|
||||
let mut locked_config = self.config.lock().unwrap();
|
||||
if locked_config.proxy_network.is_none() {
|
||||
locked_config.proxy_network = Some(vec![]);
|
||||
}
|
||||
let cidr_str = cidr.to_string();
|
||||
if let Some(mapped_cidr) = mapped_cidr.as_ref() {
|
||||
assert_eq!(
|
||||
cidr.network_length(),
|
||||
mapped_cidr.network_length(),
|
||||
"Mapped CIDR must have the same network length as the original CIDR",
|
||||
);
|
||||
}
|
||||
// insert if no duplicate
|
||||
if !locked_config
|
||||
.proxy_network
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.any(|c| c.cidr == cidr_str)
|
||||
.any(|c| c.cidr == cidr && c.mapped_cidr == mapped_cidr)
|
||||
{
|
||||
locked_config
|
||||
.proxy_network
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.push(ProxyNetworkConfig {
|
||||
cidr: cidr_str,
|
||||
cidr,
|
||||
mapped_cidr,
|
||||
allow: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_proxy_cidr(&self, cidr: cidr::IpCidr) {
|
||||
fn remove_proxy_cidr(&self, cidr: cidr::Ipv4Cidr) {
|
||||
let mut locked_config = self.config.lock().unwrap();
|
||||
if let Some(proxy_cidrs) = &mut locked_config.proxy_network {
|
||||
let cidr_str = cidr.to_string();
|
||||
proxy_cidrs.retain(|c| c.cidr != cidr_str);
|
||||
proxy_cidrs.retain(|c| c.cidr != cidr);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr> {
|
||||
fn get_proxy_cidrs(&self) -> Vec<ProxyNetworkConfig> {
|
||||
self.config
|
||||
.lock()
|
||||
.unwrap()
|
||||
.proxy_network
|
||||
.as_ref()
|
||||
.map(|v| {
|
||||
v.iter()
|
||||
.map(|c| c.cidr.parse().unwrap())
|
||||
.collect::<Vec<cidr::IpCidr>>()
|
||||
})
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
@@ -480,32 +509,6 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn get_file_logger_config(&self) -> FileLoggerConfig {
|
||||
self.config
|
||||
.lock()
|
||||
.unwrap()
|
||||
.file_logger
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn set_file_logger_config(&self, config: FileLoggerConfig) {
|
||||
self.config.lock().unwrap().file_logger = Some(config);
|
||||
}
|
||||
|
||||
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
|
||||
self.config
|
||||
.lock()
|
||||
.unwrap()
|
||||
.console_logger
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn set_console_logger_config(&self, config: ConsoleLoggerConfig) {
|
||||
self.config.lock().unwrap().console_logger = Some(config);
|
||||
}
|
||||
|
||||
fn get_peers(&self) -> Vec<PeerConfig> {
|
||||
self.config.lock().unwrap().peer.clone().unwrap_or_default()
|
||||
}
|
||||
@@ -543,6 +546,14 @@ impl ConfigLoader for TomlConfigLoader {
|
||||
self.config.lock().unwrap().rpc_portal = Some(addr);
|
||||
}
|
||||
|
||||
fn get_rpc_portal_whitelist(&self) -> Option<Vec<IpCidr>> {
|
||||
self.config.lock().unwrap().rpc_portal_whitelist.clone()
|
||||
}
|
||||
|
||||
fn set_rpc_portal_whitelist(&self, whitelist: Option<Vec<IpCidr>>) {
|
||||
self.config.lock().unwrap().rpc_portal_whitelist = whitelist;
|
||||
}
|
||||
|
||||
fn get_vpn_portal_config(&self) -> Option<VpnPortalConfig> {
|
||||
self.config.lock().unwrap().vpn_portal_config.clone()
|
||||
}
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
macro_rules! define_global_var {
|
||||
($name:ident, $type:ty, $init:expr) => {
|
||||
pub static $name: once_cell::sync::Lazy<tokio::sync::Mutex<$type>> =
|
||||
once_cell::sync::Lazy::new(|| tokio::sync::Mutex::new($init));
|
||||
pub static $name: once_cell::sync::Lazy<std::sync::Mutex<$type>> =
|
||||
once_cell::sync::Lazy::new(|| std::sync::Mutex::new($init));
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! use_global_var {
|
||||
($name:ident) => {
|
||||
crate::common::constants::$name.lock().await.to_owned()
|
||||
crate::common::constants::$name.lock().unwrap().to_owned()
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! set_global_var {
|
||||
($name:ident, $val:expr) => {
|
||||
*crate::common::constants::$name.lock().await = $val
|
||||
*crate::common::constants::$name.lock().unwrap() = $val
|
||||
};
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ define_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS, u64, 1000);
|
||||
|
||||
define_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, u64, 10);
|
||||
|
||||
define_global_var!(MACHINE_UID, Option<String>, None);
|
||||
|
||||
pub const UDP_HOLE_PUNCH_CONNECTOR_SERVICE_ID: u32 = 2;
|
||||
|
||||
pub const WIN_SERVICE_WORK_DIR_REG_KEY: &str = "SOFTWARE\\EasyTier\\Service\\WorkDir";
|
||||
|
||||
@@ -77,6 +77,15 @@ pub async fn socket_addrs(
|
||||
.port()
|
||||
.or_else(default_port_number)
|
||||
.ok_or(Error::InvalidUrl(url.to_string()))?;
|
||||
// See https://github.com/EasyTier/EasyTier/pull/947
|
||||
let port = match port {
|
||||
0 => match url.scheme() {
|
||||
"ws" => 80,
|
||||
"wss" => 443,
|
||||
_ => port,
|
||||
},
|
||||
_ => port,
|
||||
};
|
||||
|
||||
// if host is an ip address, return it directly
|
||||
if let Ok(ip) = host.parse::<std::net::IpAddr>() {
|
||||
|
||||
@@ -4,6 +4,8 @@ use std::{
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use crate::common::config::ProxyNetworkConfig;
|
||||
use crate::common::token_bucket::TokenBucketManager;
|
||||
use crate::proto::cli::PeerConnInfo;
|
||||
use crate::proto::common::{PeerFeatureFlag, PortForwardConfigPb};
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
@@ -59,7 +61,7 @@ pub struct GlobalCtx {
|
||||
event_bus: EventBus,
|
||||
|
||||
cached_ipv4: AtomicCell<Option<cidr::Ipv4Inet>>,
|
||||
cached_proxy_cidrs: AtomicCell<Option<Vec<cidr::IpCidr>>>,
|
||||
cached_proxy_cidrs: AtomicCell<Option<Vec<ProxyNetworkConfig>>>,
|
||||
|
||||
ip_collector: Mutex<Option<Arc<IPCollector>>>,
|
||||
|
||||
@@ -74,6 +76,10 @@ pub struct GlobalCtx {
|
||||
no_tun: bool,
|
||||
|
||||
feature_flags: AtomicCell<PeerFeatureFlag>,
|
||||
|
||||
quic_proxy_port: AtomicCell<Option<u16>>,
|
||||
|
||||
token_bucket_manager: TokenBucketManager,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for GlobalCtx {
|
||||
@@ -136,6 +142,9 @@ impl GlobalCtx {
|
||||
no_tun,
|
||||
|
||||
feature_flags: AtomicCell::new(feature_flags),
|
||||
quic_proxy_port: AtomicCell::new(None),
|
||||
|
||||
token_bucket_manager: TokenBucketManager::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,29 +191,6 @@ impl GlobalCtx {
|
||||
self.cached_ipv4.store(None);
|
||||
}
|
||||
|
||||
pub fn add_proxy_cidr(&self, cidr: cidr::IpCidr) -> Result<(), std::io::Error> {
|
||||
self.config.add_proxy_cidr(cidr);
|
||||
self.cached_proxy_cidrs.store(None);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn remove_proxy_cidr(&self, cidr: cidr::IpCidr) -> Result<(), std::io::Error> {
|
||||
self.config.remove_proxy_cidr(cidr);
|
||||
self.cached_proxy_cidrs.store(None);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_proxy_cidrs(&self) -> Vec<cidr::IpCidr> {
|
||||
if let Some(proxy_cidrs) = self.cached_proxy_cidrs.take() {
|
||||
self.cached_proxy_cidrs.store(Some(proxy_cidrs.clone()));
|
||||
return proxy_cidrs;
|
||||
}
|
||||
|
||||
let ret = self.config.get_proxy_cidrs();
|
||||
self.cached_proxy_cidrs.store(Some(ret.clone()));
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn get_id(&self) -> uuid::Uuid {
|
||||
self.config.get_id()
|
||||
}
|
||||
@@ -303,6 +289,18 @@ impl GlobalCtx {
|
||||
pub fn set_feature_flags(&self, flags: PeerFeatureFlag) {
|
||||
self.feature_flags.store(flags);
|
||||
}
|
||||
|
||||
pub fn get_quic_proxy_port(&self) -> Option<u16> {
|
||||
self.quic_proxy_port.load()
|
||||
}
|
||||
|
||||
pub fn set_quic_proxy_port(&self, port: Option<u16>) {
|
||||
self.quic_proxy_port.store(port);
|
||||
}
|
||||
|
||||
pub fn token_bucket_manager(&self) -> &TokenBucketManager {
|
||||
&self.token_bucket_manager
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -4,9 +4,12 @@ use std::{
|
||||
io::Write as _,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use time::util::refresh_tz;
|
||||
use tokio::{task::JoinSet, time::timeout};
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::{set_global_var, use_global_var};
|
||||
|
||||
pub mod compressor;
|
||||
pub mod config;
|
||||
pub mod constants;
|
||||
@@ -20,13 +23,12 @@ pub mod network;
|
||||
pub mod scoped_task;
|
||||
pub mod stun;
|
||||
pub mod stun_codec_ext;
|
||||
pub mod token_bucket;
|
||||
|
||||
pub fn get_logger_timer<F: time::formatting::Formattable>(
|
||||
format: F,
|
||||
) -> tracing_subscriber::fmt::time::OffsetTime<F> {
|
||||
unsafe {
|
||||
time::util::local_offset::set_soundness(time::util::local_offset::Soundness::Unsound)
|
||||
};
|
||||
refresh_tz();
|
||||
let local_offset = time::UtcOffset::current_local_offset()
|
||||
.unwrap_or(time::UtcOffset::from_whole_seconds(0).unwrap());
|
||||
tracing_subscriber::fmt::time::OffsetTime::new(local_offset, format)
|
||||
@@ -88,7 +90,17 @@ pub fn join_joinset_background<T: Debug + Send + Sync + 'static>(
|
||||
);
|
||||
}
|
||||
|
||||
pub fn set_default_machine_id(mid: Option<String>) {
|
||||
set_global_var!(MACHINE_UID, mid);
|
||||
}
|
||||
|
||||
pub fn get_machine_id() -> uuid::Uuid {
|
||||
if let Some(default_mid) = use_global_var!(MACHINE_UID) {
|
||||
let mut b = [0u8; 16];
|
||||
crate::tunnel::generate_digest_from_str("", &default_mid, &mut b);
|
||||
return uuid::Uuid::from_bytes(b);
|
||||
}
|
||||
|
||||
// a path same as the binary
|
||||
let machine_id_file = std::env::current_exe()
|
||||
.map(|x| x.with_file_name("et_machine_id"))
|
||||
@@ -109,6 +121,9 @@ pub fn get_machine_id() -> uuid::Uuid {
|
||||
))]
|
||||
let gen_mid = machine_uid::get()
|
||||
.map(|x| {
|
||||
if x.is_empty() {
|
||||
return uuid::Uuid::new_v4();
|
||||
}
|
||||
let mut b = [0u8; 16];
|
||||
crate::tunnel::generate_digest_from_str("", x.as_str(), &mut b);
|
||||
uuid::Uuid::from_bytes(b)
|
||||
|
||||
@@ -955,9 +955,18 @@ mod tests {
|
||||
async fn test_txt_public_stun_server() {
|
||||
let stun_servers = vec!["txt:stun.easytier.cn".to_string()];
|
||||
let detector = UdpNatTypeDetector::new(stun_servers, 1);
|
||||
let ret = detector.detect_nat_type(0).await;
|
||||
println!("{:#?}, {:?}", ret, ret.as_ref().unwrap().nat_type());
|
||||
assert!(!ret.unwrap().stun_resps.is_empty());
|
||||
for _ in 0..5 {
|
||||
let ret = detector.detect_nat_type(0).await;
|
||||
println!("{:#?}, {:?}", ret, ret.as_ref().unwrap().nat_type());
|
||||
if ret.is_ok() {
|
||||
assert!(!ret.unwrap().stun_resps.is_empty());
|
||||
return;
|
||||
}
|
||||
}
|
||||
debug_assert!(
|
||||
false,
|
||||
"should not reach here, stun server should be available"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
312
easytier/src/common/token_bucket.rs
Normal file
312
easytier/src/common/token_bucket.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
use atomic_shim::AtomicU64;
|
||||
use dashmap::DashMap;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time;
|
||||
|
||||
use crate::common::scoped_task::ScopedTask;
|
||||
use crate::proto::common::LimiterConfig;
|
||||
|
||||
/// Token Bucket rate limiter using atomic operations
|
||||
pub struct TokenBucket {
|
||||
available_tokens: AtomicU64, // Current token count (atomic)
|
||||
last_refill_time: AtomicU64, // Last refill time as micros since epoch
|
||||
config: BucketConfig, // Immutable configuration
|
||||
refill_task: Mutex<Option<ScopedTask<()>>>, // Background refill task
|
||||
start_time: Instant, // Bucket creation time
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct BucketConfig {
|
||||
capacity: u64, // Maximum token capacity
|
||||
fill_rate: u64, // Tokens added per second
|
||||
refill_interval: Duration, // Time between refill operations
|
||||
}
|
||||
|
||||
impl From<LimiterConfig> for BucketConfig {
|
||||
fn from(cfg: LimiterConfig) -> Self {
|
||||
let burst_rate = 1.max(cfg.burst_rate.unwrap_or(1));
|
||||
let fill_rate = 8196.max(cfg.bps.unwrap_or(u64::MAX / burst_rate));
|
||||
let refill_interval = cfg
|
||||
.fill_duration_ms
|
||||
.map(|x| Duration::from_millis(1.max(x)))
|
||||
.unwrap_or(Duration::from_millis(10));
|
||||
BucketConfig {
|
||||
capacity: burst_rate * fill_rate,
|
||||
fill_rate: fill_rate,
|
||||
refill_interval,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TokenBucket {
|
||||
pub fn new(capacity: u64, bps: u64, refill_interval: Duration) -> Arc<Self> {
|
||||
let config = BucketConfig {
|
||||
capacity,
|
||||
fill_rate: bps,
|
||||
refill_interval,
|
||||
};
|
||||
Self::new_from_cfg(config)
|
||||
}
|
||||
|
||||
/// Creates a new Token Bucket rate limiter
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `capacity` - Bucket capacity in bytes
|
||||
/// * `bps` - Bandwidth limit in bytes per second
|
||||
/// * `refill_interval` - Refill interval (recommended 10-50ms)
|
||||
pub fn new_from_cfg(config: BucketConfig) -> Arc<Self> {
|
||||
// Create Arc instance with placeholder task
|
||||
let arc_self = Arc::new(Self {
|
||||
available_tokens: AtomicU64::new(config.capacity),
|
||||
last_refill_time: AtomicU64::new(0),
|
||||
config,
|
||||
refill_task: Mutex::new(None),
|
||||
start_time: std::time::Instant::now(),
|
||||
});
|
||||
|
||||
// Start background refill task
|
||||
let arc_clone = arc_self.clone();
|
||||
let refill_task = tokio::spawn(async move {
|
||||
let mut interval = time::interval(arc_clone.config.refill_interval);
|
||||
loop {
|
||||
interval.tick().await;
|
||||
arc_clone.refill();
|
||||
}
|
||||
});
|
||||
|
||||
// Replace placeholder task with actual one
|
||||
arc_self
|
||||
.refill_task
|
||||
.lock()
|
||||
.unwrap()
|
||||
.replace(refill_task.into());
|
||||
arc_self
|
||||
}
|
||||
|
||||
/// Internal refill method (called only by background task)
|
||||
fn refill(&self) {
|
||||
let now_micros = self.elapsed_micros();
|
||||
let prev_time = self.last_refill_time.swap(now_micros, Ordering::Acquire);
|
||||
|
||||
// Calculate elapsed time in seconds
|
||||
let elapsed_secs = (now_micros.saturating_sub(prev_time)) as f64 / 1_000_000.0;
|
||||
|
||||
// Calculate tokens to add
|
||||
let tokens_to_add = (self.config.fill_rate as f64 * elapsed_secs) as u64;
|
||||
if tokens_to_add == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Add tokens without exceeding capacity
|
||||
let mut current = self.available_tokens.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let new = current
|
||||
.saturating_add(tokens_to_add)
|
||||
.min(self.config.capacity);
|
||||
match self.available_tokens.compare_exchange_weak(
|
||||
current,
|
||||
new,
|
||||
Ordering::Release,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(actual) => current = actual,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate microseconds since bucket creation
|
||||
fn elapsed_micros(&self) -> u64 {
|
||||
self.start_time.elapsed().as_micros() as u64
|
||||
}
|
||||
|
||||
/// Attempt to consume tokens without blocking
|
||||
///
|
||||
/// # Returns
|
||||
/// `true` if tokens were consumed, `false` if insufficient tokens
|
||||
pub fn try_consume(&self, tokens: u64) -> bool {
|
||||
// Fast path for oversized packets
|
||||
if tokens > self.config.capacity {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut current = self.available_tokens.load(Ordering::Relaxed);
|
||||
loop {
|
||||
if current < tokens {
|
||||
return false;
|
||||
}
|
||||
|
||||
let new = current - tokens;
|
||||
match self.available_tokens.compare_exchange_weak(
|
||||
current,
|
||||
new,
|
||||
Ordering::AcqRel,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => return true,
|
||||
Err(actual) => current = actual,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TokenBucketManager {
|
||||
buckets: Arc<DashMap<String, Arc<TokenBucket>>>,
|
||||
|
||||
retain_task: ScopedTask<()>,
|
||||
}
|
||||
|
||||
impl TokenBucketManager {
|
||||
/// Creates a new TokenBucketManager
|
||||
pub fn new() -> Self {
|
||||
let buckets = Arc::new(DashMap::new());
|
||||
|
||||
let buckets_clone = buckets.clone();
|
||||
let retain_task = tokio::spawn(async move {
|
||||
loop {
|
||||
// Retain only buckets that are still in use
|
||||
buckets_clone.retain(|_, bucket| Arc::<TokenBucket>::strong_count(bucket) <= 1);
|
||||
// Sleep for a while before next retention check
|
||||
tokio::time::sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
buckets,
|
||||
retain_task: retain_task.into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get or create a token bucket for the given key
|
||||
pub fn get_or_create(&self, key: &str, cfg: BucketConfig) -> Arc<TokenBucket> {
|
||||
self.buckets
|
||||
.entry(key.to_string())
|
||||
.or_insert_with(|| TokenBucket::new_from_cfg(cfg))
|
||||
.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
/// Test initial state after creation
|
||||
#[tokio::test]
|
||||
async fn test_initial_state() {
|
||||
let bucket = TokenBucket::new(1000, 1000, Duration::from_millis(10));
|
||||
|
||||
// Should have full capacity initially
|
||||
assert!(bucket.try_consume(1000));
|
||||
assert!(!bucket.try_consume(1)); // Should be empty now
|
||||
}
|
||||
|
||||
/// Test token consumption behavior
|
||||
#[tokio::test]
|
||||
async fn test_consumption() {
|
||||
let bucket = TokenBucket::new(1500, 1000, Duration::from_millis(10));
|
||||
|
||||
// First packet should succeed
|
||||
assert!(bucket.try_consume(1000));
|
||||
|
||||
// Second packet should fail (only 500 left)
|
||||
assert!(!bucket.try_consume(600));
|
||||
|
||||
// Should be able to take remaining tokens
|
||||
assert!(bucket.try_consume(500));
|
||||
}
|
||||
|
||||
/// Test background refill functionality
|
||||
#[tokio::test]
|
||||
async fn test_refill() {
|
||||
let bucket = TokenBucket::new(1000, 1000, Duration::from_millis(10));
|
||||
|
||||
// Drain the bucket
|
||||
assert!(bucket.try_consume(1000));
|
||||
assert!(!bucket.try_consume(1));
|
||||
|
||||
// Wait for refill (1 refill interval + buffer)
|
||||
sleep(Duration::from_millis(25)).await;
|
||||
|
||||
// Should have approximately 20 tokens (1000 tokens/s * 0.02s)
|
||||
assert!(bucket.try_consume(15));
|
||||
assert!(!bucket.try_consume(10)); // But not full capacity
|
||||
}
|
||||
|
||||
/// Test capacity enforcement
|
||||
#[tokio::test]
|
||||
async fn test_capacity_limit() {
|
||||
let bucket = TokenBucket::new(500, 1000, Duration::from_millis(10));
|
||||
|
||||
// Wait longer than refill interval
|
||||
sleep(Duration::from_millis(50)).await;
|
||||
|
||||
// Should not exceed capacity despite time passed
|
||||
assert!(bucket.try_consume(500));
|
||||
assert!(!bucket.try_consume(1));
|
||||
}
|
||||
|
||||
/// Test high load with concurrent access
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_access() {
|
||||
let bucket = TokenBucket::new(10_000, 1_000_000, Duration::from_millis(10));
|
||||
let mut handles = vec![];
|
||||
|
||||
// Spawn 100 tasks to consume tokens concurrently
|
||||
for _ in 0..100 {
|
||||
let bucket = bucket.clone();
|
||||
handles.push(tokio::spawn(async move {
|
||||
for _ in 0..100 {
|
||||
let _ = bucket.try_consume(10);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
// Wait for all tasks to complete
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
|
||||
// Verify we didn't exceed capacity
|
||||
let tokens_left = bucket.available_tokens.load(Ordering::Relaxed);
|
||||
assert!(
|
||||
tokens_left <= 10_000,
|
||||
"Tokens exceeded capacity: {}",
|
||||
tokens_left
|
||||
);
|
||||
}
|
||||
|
||||
/// Test behavior when packet size exceeds capacity
|
||||
#[tokio::test]
|
||||
async fn test_oversized_packet() {
|
||||
let bucket = TokenBucket::new(1500, 1000, Duration::from_millis(10));
|
||||
|
||||
// Packet larger than capacity should be rejected
|
||||
assert!(!bucket.try_consume(1600));
|
||||
|
||||
// Regular packets should still work
|
||||
assert!(bucket.try_consume(1000));
|
||||
}
|
||||
|
||||
/// Test refill precision with small intervals
|
||||
#[tokio::test]
|
||||
async fn test_refill_precision() {
|
||||
let bucket = TokenBucket::new(10_000, 10_000, Duration::from_micros(100)); // 100μs interval
|
||||
|
||||
// Drain most tokens
|
||||
assert!(bucket.try_consume(9900));
|
||||
|
||||
// Wait for multiple refills
|
||||
sleep(Duration::from_millis(1)).await;
|
||||
|
||||
// Should have accumulated about 100 tokens (10,000 tokens/s * 0.001s)
|
||||
let tokens = bucket.available_tokens.load(Ordering::Relaxed);
|
||||
assert!(
|
||||
tokens >= 100 && tokens <= 200,
|
||||
"Unexpected token count: {}",
|
||||
tokens
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -186,7 +186,7 @@ impl DirectConnectorManagerData {
|
||||
.await?;
|
||||
|
||||
// NOTICE: must add as directly connected tunnel
|
||||
self.peer_manager.add_direct_tunnel(ret).await
|
||||
self.peer_manager.add_client_tunnel(ret, true).await
|
||||
}
|
||||
|
||||
async fn do_try_connect_to_ip(&self, dst_peer_id: PeerId, addr: String) -> Result<(), Error> {
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
use std::{collections::BTreeSet, sync::Arc};
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use dashmap::{DashMap, DashSet};
|
||||
@@ -12,7 +15,7 @@ use tokio::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
common::PeerId,
|
||||
common::{join_joinset_background, PeerId},
|
||||
peers::peer_conn::PeerConnId,
|
||||
proto::{
|
||||
cli::{
|
||||
@@ -53,7 +56,7 @@ struct ReconnResult {
|
||||
struct ConnectorManagerData {
|
||||
connectors: ConnectorMap,
|
||||
reconnecting: DashSet<String>,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
peer_manager: Weak<PeerManager>,
|
||||
alive_conn_urls: Arc<DashSet<String>>,
|
||||
// user removed connector urls
|
||||
removed_conn_urls: Arc<DashSet<String>>,
|
||||
@@ -78,7 +81,7 @@ impl ManualConnectorManager {
|
||||
data: Arc::new(ConnectorManagerData {
|
||||
connectors,
|
||||
reconnecting: DashSet::new(),
|
||||
peer_manager,
|
||||
peer_manager: Arc::downgrade(&peer_manager),
|
||||
alive_conn_urls: Arc::new(DashSet::new()),
|
||||
removed_conn_urls: Arc::new(DashSet::new()),
|
||||
net_ns: global_ctx.net_ns.clone(),
|
||||
@@ -190,20 +193,18 @@ impl ManualConnectorManager {
|
||||
tracing::warn!("event_recv lagged: {}, rebuild alive conn list", n);
|
||||
event_recv = event_recv.resubscribe();
|
||||
data.alive_conn_urls.clear();
|
||||
for x in data
|
||||
.peer_manager
|
||||
.get_peer_map()
|
||||
.get_alive_conns()
|
||||
.iter()
|
||||
.map(|x| {
|
||||
x.tunnel
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.remote_addr
|
||||
.unwrap_or_default()
|
||||
.to_string()
|
||||
})
|
||||
{
|
||||
let Some(pm) = data.peer_manager.upgrade() else {
|
||||
tracing::warn!("peer manager is gone, exit");
|
||||
break;
|
||||
};
|
||||
for x in pm.get_peer_map().get_alive_conns().iter().map(|x| {
|
||||
x.tunnel
|
||||
.clone()
|
||||
.unwrap_or_default()
|
||||
.remote_addr
|
||||
.unwrap_or_default()
|
||||
.to_string()
|
||||
}) {
|
||||
data.alive_conn_urls.insert(x);
|
||||
}
|
||||
continue;
|
||||
@@ -222,6 +223,8 @@ impl ManualConnectorManager {
|
||||
use_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS),
|
||||
));
|
||||
let (reconn_result_send, mut reconn_result_recv) = mpsc::channel(100);
|
||||
let tasks = Arc::new(std::sync::Mutex::new(JoinSet::new()));
|
||||
join_joinset_background(tasks.clone(), "connector_reconnect_tasks".to_string());
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
@@ -237,7 +240,7 @@ impl ManualConnectorManager {
|
||||
let insert_succ = data.reconnecting.insert(dead_url.clone());
|
||||
assert!(insert_succ);
|
||||
|
||||
tokio::spawn(async move {
|
||||
tasks.lock().unwrap().spawn(async move {
|
||||
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await;
|
||||
sender.send(reconn_ret).await.unwrap();
|
||||
|
||||
@@ -340,8 +343,13 @@ impl ManualConnectorManager {
|
||||
connector.lock().await.remote_url().clone(),
|
||||
));
|
||||
tracing::info!("reconnect try connect... conn: {:?}", connector);
|
||||
let (peer_id, conn_id) = data
|
||||
.peer_manager
|
||||
let Some(pm) = data.peer_manager.upgrade() else {
|
||||
return Err(Error::AnyhowError(anyhow::anyhow!(
|
||||
"peer manager is gone, cannot reconnect"
|
||||
)));
|
||||
};
|
||||
|
||||
let (peer_id, conn_id) = pm
|
||||
.try_direct_connect(connector.lock().await.as_mut())
|
||||
.await?;
|
||||
tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url);
|
||||
|
||||
@@ -56,8 +56,8 @@ impl From<NatType> for UdpNatType {
|
||||
fn from(nat_type: NatType) -> Self {
|
||||
match nat_type {
|
||||
NatType::Unknown => UdpNatType::Unknown,
|
||||
NatType::NoPat | NatType::OpenInternet => UdpNatType::Open(nat_type),
|
||||
NatType::FullCone | NatType::Restricted | NatType::PortRestricted => {
|
||||
NatType::OpenInternet => UdpNatType::Open(nat_type),
|
||||
NatType::NoPat | NatType::FullCone | NatType::Restricted | NatType::PortRestricted => {
|
||||
UdpNatType::Cone(nat_type)
|
||||
}
|
||||
NatType::Symmetric | NatType::SymUdpFirewall => UdpNatType::HardSymmetric(nat_type),
|
||||
|
||||
@@ -221,7 +221,7 @@ impl UdpHoePunchConnectorData {
|
||||
Ok(Some(tunnel)) => {
|
||||
tracing::info!(?tunnel, "hole punching get tunnel success");
|
||||
|
||||
if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel).await {
|
||||
if let Err(e) = self.peer_mgr.add_client_tunnel(tunnel, false).await {
|
||||
tracing::warn!(?e, "add client tunnel failed");
|
||||
op(true);
|
||||
false
|
||||
|
||||
@@ -1083,7 +1083,8 @@ async fn main() -> Result<(), Error> {
|
||||
.iter()
|
||||
.map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,))
|
||||
.collect::<Vec<_>>();
|
||||
let direct_peers: Vec<_> = v.direct_peers
|
||||
let direct_peers: Vec<_> = v
|
||||
.direct_peers
|
||||
.iter()
|
||||
.map(|(k, v)| DirectPeerItem {
|
||||
node_id: k.to_string(),
|
||||
@@ -1257,23 +1258,14 @@ async fn main() -> Result<(), Error> {
|
||||
}
|
||||
SubCommand::Proxy => {
|
||||
let mut entries = vec![];
|
||||
let client = handler.get_tcp_proxy_client("tcp").await?;
|
||||
let ret = client
|
||||
.list_tcp_proxy_entry(BaseController::default(), Default::default())
|
||||
.await;
|
||||
entries.extend(ret.unwrap_or_default().entries);
|
||||
|
||||
let client = handler.get_tcp_proxy_client("kcp_src").await?;
|
||||
let ret = client
|
||||
.list_tcp_proxy_entry(BaseController::default(), Default::default())
|
||||
.await;
|
||||
entries.extend(ret.unwrap_or_default().entries);
|
||||
|
||||
let client = handler.get_tcp_proxy_client("kcp_dst").await?;
|
||||
let ret = client
|
||||
.list_tcp_proxy_entry(BaseController::default(), Default::default())
|
||||
.await;
|
||||
entries.extend(ret.unwrap_or_default().entries);
|
||||
for client_type in &["tcp", "kcp_src", "kcp_dst", "quic_src", "quic_dst"] {
|
||||
let client = handler.get_tcp_proxy_client(client_type).await?;
|
||||
let ret = client
|
||||
.list_tcp_proxy_entry(BaseController::default(), Default::default())
|
||||
.await;
|
||||
entries.extend(ret.unwrap_or_default().entries);
|
||||
}
|
||||
|
||||
if cli.verbose {
|
||||
println!("{}", serde_json::to_string_pretty(&entries)?);
|
||||
|
||||
@@ -11,25 +11,24 @@ use std::{
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use cidr::IpCidr;
|
||||
use clap::Parser;
|
||||
|
||||
use easytier::{
|
||||
common::{
|
||||
config::{
|
||||
ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, NetworkIdentity, PeerConfig,
|
||||
PortForwardConfig, TomlConfigLoader, VpnPortalConfig,
|
||||
ConfigLoader, ConsoleLoggerConfig, FileLoggerConfig, LoggingConfigLoader,
|
||||
NetworkIdentity, PeerConfig, PortForwardConfig, TomlConfigLoader, VpnPortalConfig,
|
||||
},
|
||||
constants::EASYTIER_VERSION,
|
||||
global_ctx::{EventBusSubscriber, GlobalCtx, GlobalCtxEvent},
|
||||
scoped_task::ScopedTask,
|
||||
global_ctx::GlobalCtx,
|
||||
set_default_machine_id,
|
||||
stun::MockStunInfoCollector,
|
||||
},
|
||||
connector::{create_connector_by_url, dns_connector::DNSTunnelConnector},
|
||||
launcher,
|
||||
proto::{
|
||||
self,
|
||||
common::{CompressionAlgoPb, NatType},
|
||||
},
|
||||
connector::create_connector_by_url,
|
||||
instance_manager::NetworkInstanceManager,
|
||||
launcher::{add_proxy_network_to_config, ConfigSource},
|
||||
proto::common::{CompressionAlgoPb, NatType},
|
||||
tunnel::{IpVersion, PROTO_PORT_OFFSET},
|
||||
utils::{init_logger, setup_panic_handler},
|
||||
web_client,
|
||||
@@ -39,11 +38,11 @@ use easytier::{
|
||||
windows_service::define_windows_service!(ffi_service_main, win_service_main);
|
||||
|
||||
#[cfg(all(feature = "mimalloc", not(feature = "jemalloc")))]
|
||||
use mimalloc_rust::GlobalMiMalloc;
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[cfg(all(feature = "mimalloc", not(feature = "jemalloc")))]
|
||||
#[global_allocator]
|
||||
static GLOBAL_MIMALLOC: GlobalMiMalloc = GlobalMiMalloc;
|
||||
static GLOBAL_MIMALLOC: MiMalloc = MiMalloc;
|
||||
|
||||
#[cfg(feature = "jemalloc")]
|
||||
use jemalloc_ctl::{epoch, stats, Access as _, AsName as _};
|
||||
@@ -101,14 +100,32 @@ struct Cli {
|
||||
)]
|
||||
config_server: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_MACHINE_ID",
|
||||
help = t!("core_clap.machine_id").to_string()
|
||||
)]
|
||||
machine_id: Option<String>,
|
||||
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
env = "ET_CONFIG_FILE",
|
||||
help = t!("core_clap.config_file").to_string()
|
||||
value_delimiter = ',',
|
||||
help = t!("core_clap.config_file").to_string(),
|
||||
num_args = 1..,
|
||||
)]
|
||||
config_file: Option<PathBuf>,
|
||||
config_file: Option<Vec<PathBuf>>,
|
||||
|
||||
#[command(flatten)]
|
||||
network_options: NetworkOptions,
|
||||
|
||||
#[command(flatten)]
|
||||
logging_options: LoggingOptions,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
struct NetworkOptions {
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_NETWORK_NAME",
|
||||
@@ -176,6 +193,14 @@ struct Cli {
|
||||
)]
|
||||
rpc_portal: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_RPC_PORTAL_WHITELIST",
|
||||
value_delimiter = ',',
|
||||
help = t!("core_clap.rpc_portal_whitelist").to_string(),
|
||||
)]
|
||||
rpc_portal_whitelist: Option<Vec<IpCidr>>,
|
||||
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
@@ -203,27 +228,6 @@ struct Cli {
|
||||
)]
|
||||
no_listener: bool,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_CONSOLE_LOG_LEVEL",
|
||||
help = t!("core_clap.console_log_level").to_string()
|
||||
)]
|
||||
console_log_level: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_FILE_LOG_LEVEL",
|
||||
help = t!("core_clap.file_log_level").to_string()
|
||||
)]
|
||||
file_log_level: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_FILE_LOG_DIR",
|
||||
help = t!("core_clap.file_log_dir").to_string()
|
||||
)]
|
||||
file_log_dir: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_HOSTNAME",
|
||||
@@ -437,6 +441,24 @@ struct Cli {
|
||||
)]
|
||||
disable_kcp_input: Option<bool>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_ENABLE_QUIC_PROXY",
|
||||
help = t!("core_clap.enable_quic_proxy").to_string(),
|
||||
num_args = 0..=1,
|
||||
default_missing_value = "true"
|
||||
)]
|
||||
enable_quic_proxy: Option<bool>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_DISABLE_QUIC_INPUT",
|
||||
help = t!("core_clap.disable_quic_input").to_string(),
|
||||
num_args = 0..=1,
|
||||
default_missing_value = "true"
|
||||
)]
|
||||
disable_quic_input: Option<bool>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_PORT_FORWARD",
|
||||
@@ -452,6 +474,44 @@ struct Cli {
|
||||
help = t!("core_clap.accept_dns").to_string(),
|
||||
)]
|
||||
accept_dns: Option<bool>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_PRIVATE_MODE",
|
||||
help = t!("core_clap.private_mode").to_string(),
|
||||
)]
|
||||
private_mode: Option<bool>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_FOREIGN_RELAY_BPS_LIMIT",
|
||||
help = t!("core_clap.foreign_relay_bps_limit").to_string(),
|
||||
)]
|
||||
foreign_relay_bps_limit: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
struct LoggingOptions {
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_CONSOLE_LOG_LEVEL",
|
||||
help = t!("core_clap.console_log_level").to_string()
|
||||
)]
|
||||
console_log_level: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_FILE_LOG_LEVEL",
|
||||
help = t!("core_clap.file_log_level").to_string()
|
||||
)]
|
||||
file_log_level: Option<String>,
|
||||
|
||||
#[arg(
|
||||
long,
|
||||
env = "ET_FILE_LOG_DIR",
|
||||
help = t!("core_clap.file_log_dir").to_string()
|
||||
)]
|
||||
file_log_dir: Option<String>,
|
||||
}
|
||||
|
||||
rust_i18n::i18n!("locales", fallback = "en");
|
||||
@@ -511,43 +571,47 @@ impl Cli {
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(cli: &Cli) -> Result<Self, Self::Error> {
|
||||
let cfg = if let Some(config_file) = &cli.config_file {
|
||||
TomlConfigLoader::new(config_file)
|
||||
.with_context(|| format!("failed to load config file: {:?}", cli.config_file))?
|
||||
} else {
|
||||
TomlConfigLoader::default()
|
||||
impl NetworkOptions {
|
||||
fn can_merge(&self, cfg: &TomlConfigLoader, config_file_count: usize) -> bool {
|
||||
if config_file_count == 1 {
|
||||
return true;
|
||||
}
|
||||
let Some(network_name) = &self.network_name else {
|
||||
return false;
|
||||
};
|
||||
if cfg.get_network_identity().network_name == *network_name {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
if cli.hostname.is_some() {
|
||||
cfg.set_hostname(cli.hostname.clone());
|
||||
fn merge_into(&self, cfg: &mut TomlConfigLoader) -> anyhow::Result<()> {
|
||||
if self.hostname.is_some() {
|
||||
cfg.set_hostname(self.hostname.clone());
|
||||
}
|
||||
|
||||
let old_ns = cfg.get_network_identity();
|
||||
let network_name = cli.network_name.clone().unwrap_or(old_ns.network_name);
|
||||
let network_secret = cli
|
||||
let network_name = self.network_name.clone().unwrap_or(old_ns.network_name);
|
||||
let network_secret = self
|
||||
.network_secret
|
||||
.clone()
|
||||
.unwrap_or(old_ns.network_secret.unwrap_or_default());
|
||||
cfg.set_network_identity(NetworkIdentity::new(network_name, network_secret));
|
||||
|
||||
if let Some(dhcp) = cli.dhcp {
|
||||
if let Some(dhcp) = self.dhcp {
|
||||
cfg.set_dhcp(dhcp);
|
||||
}
|
||||
|
||||
if let Some(ipv4) = &cli.ipv4 {
|
||||
if let Some(ipv4) = &self.ipv4 {
|
||||
cfg.set_ipv4(Some(ipv4.parse().with_context(|| {
|
||||
format!("failed to parse ipv4 address: {}", ipv4)
|
||||
})?))
|
||||
}
|
||||
|
||||
if !cli.peers.is_empty() {
|
||||
if !self.peers.is_empty() {
|
||||
let mut peers = cfg.get_peers();
|
||||
peers.reserve(peers.len() + cli.peers.len());
|
||||
for p in &cli.peers {
|
||||
peers.reserve(peers.len() + self.peers.len());
|
||||
for p in &self.peers {
|
||||
peers.push(PeerConfig {
|
||||
uri: p
|
||||
.parse()
|
||||
@@ -557,9 +621,9 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
cfg.set_peers(peers);
|
||||
}
|
||||
|
||||
if cli.no_listener || !cli.listeners.is_empty() {
|
||||
if self.no_listener || !self.listeners.is_empty() {
|
||||
cfg.set_listeners(
|
||||
Cli::parse_listeners(cli.no_listener, cli.listeners.clone())?
|
||||
Cli::parse_listeners(self.no_listener, self.listeners.clone())?
|
||||
.into_iter()
|
||||
.map(|s| s.parse().unwrap())
|
||||
.collect(),
|
||||
@@ -573,9 +637,9 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
);
|
||||
}
|
||||
|
||||
if !cli.mapped_listeners.is_empty() {
|
||||
if !self.mapped_listeners.is_empty() {
|
||||
cfg.set_mapped_listeners(Some(
|
||||
cli.mapped_listeners
|
||||
self.mapped_listeners
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.parse()
|
||||
@@ -592,14 +656,11 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
));
|
||||
}
|
||||
|
||||
for n in cli.proxy_networks.iter() {
|
||||
cfg.add_proxy_cidr(
|
||||
n.parse()
|
||||
.with_context(|| format!("failed to parse proxy network: {}", n))?,
|
||||
);
|
||||
for n in self.proxy_networks.iter() {
|
||||
add_proxy_network_to_config(n, &cfg)?;
|
||||
}
|
||||
|
||||
let rpc_portal = if let Some(r) = &cli.rpc_portal {
|
||||
let rpc_portal = if let Some(r) = &self.rpc_portal {
|
||||
Cli::parse_rpc_portal(r.clone())
|
||||
.with_context(|| format!("failed to parse rpc portal: {}", r))?
|
||||
} else if let Some(r) = cfg.get_rpc_portal() {
|
||||
@@ -609,7 +670,9 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
};
|
||||
cfg.set_rpc_portal(rpc_portal);
|
||||
|
||||
if let Some(external_nodes) = cli.external_node.as_ref() {
|
||||
cfg.set_rpc_portal_whitelist(self.rpc_portal_whitelist.clone());
|
||||
|
||||
if let Some(external_nodes) = self.external_node.as_ref() {
|
||||
let mut old_peers = cfg.get_peers();
|
||||
old_peers.push(PeerConfig {
|
||||
uri: external_nodes.parse().with_context(|| {
|
||||
@@ -619,37 +682,11 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
cfg.set_peers(old_peers);
|
||||
}
|
||||
|
||||
if cli.console_log_level.is_some() {
|
||||
cfg.set_console_logger_config(ConsoleLoggerConfig {
|
||||
level: cli.console_log_level.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(inst_name) = &cli.instance_name {
|
||||
if let Some(inst_name) = &self.instance_name {
|
||||
cfg.set_inst_name(inst_name.clone());
|
||||
}
|
||||
|
||||
if cli.file_log_dir.is_some() || cli.file_log_level.is_some() {
|
||||
let inst_name = cfg.get_inst_name();
|
||||
let old_fl = cfg.get_file_logger_config();
|
||||
let file_log_dir = if cli.file_log_dir.is_some() {
|
||||
&cli.file_log_dir
|
||||
} else {
|
||||
&old_fl.dir
|
||||
};
|
||||
let file_log_level = if cli.file_log_level.is_some() {
|
||||
&cli.file_log_level
|
||||
} else {
|
||||
&old_fl.level
|
||||
};
|
||||
cfg.set_file_logger_config(FileLoggerConfig {
|
||||
level: file_log_level.clone(),
|
||||
dir: file_log_dir.clone(),
|
||||
file: Some(format!("easytier-{}", inst_name)),
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(vpn_portal) = cli.vpn_portal.as_ref() {
|
||||
if let Some(vpn_portal) = self.vpn_portal.as_ref() {
|
||||
let url: url::Url = vpn_portal
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse vpn portal url: {}", vpn_portal))?;
|
||||
@@ -669,7 +706,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(manual_routes) = cli.manual_routes.as_ref() {
|
||||
if let Some(manual_routes) = self.manual_routes.as_ref() {
|
||||
let mut routes = Vec::<cidr::Ipv4Cidr>::with_capacity(manual_routes.len());
|
||||
for r in manual_routes {
|
||||
routes.push(
|
||||
@@ -681,7 +718,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
}
|
||||
|
||||
#[cfg(feature = "socks5")]
|
||||
if let Some(socks5_proxy) = cli.socks5 {
|
||||
if let Some(socks5_proxy) = self.socks5 {
|
||||
cfg.set_socks5_portal(Some(
|
||||
format!("socks5://0.0.0.0:{}", socks5_proxy)
|
||||
.parse()
|
||||
@@ -690,7 +727,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
}
|
||||
|
||||
#[cfg(feature = "socks5")]
|
||||
for port_forward in cli.port_forward.iter() {
|
||||
for port_forward in self.port_forward.iter() {
|
||||
let example_str = ", example: udp://0.0.0.0:12345/10.126.126.1:12345";
|
||||
|
||||
let bind_addr = format!(
|
||||
@@ -724,38 +761,38 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
}
|
||||
|
||||
let mut f = cfg.get_flags();
|
||||
if let Some(default_protocol) = &cli.default_protocol {
|
||||
if let Some(default_protocol) = &self.default_protocol {
|
||||
f.default_protocol = default_protocol.clone()
|
||||
};
|
||||
if let Some(v) = cli.disable_encryption {
|
||||
if let Some(v) = self.disable_encryption {
|
||||
f.enable_encryption = !v;
|
||||
}
|
||||
if let Some(v) = cli.disable_ipv6 {
|
||||
if let Some(v) = self.disable_ipv6 {
|
||||
f.enable_ipv6 = !v;
|
||||
}
|
||||
f.latency_first = cli.latency_first.unwrap_or(f.latency_first);
|
||||
if let Some(dev_name) = &cli.dev_name {
|
||||
f.latency_first = self.latency_first.unwrap_or(f.latency_first);
|
||||
if let Some(dev_name) = &self.dev_name {
|
||||
f.dev_name = dev_name.clone()
|
||||
}
|
||||
if let Some(mtu) = cli.mtu {
|
||||
if let Some(mtu) = self.mtu {
|
||||
f.mtu = mtu as u32;
|
||||
}
|
||||
f.enable_exit_node = cli.enable_exit_node.unwrap_or(f.enable_exit_node);
|
||||
f.proxy_forward_by_system = cli
|
||||
f.enable_exit_node = self.enable_exit_node.unwrap_or(f.enable_exit_node);
|
||||
f.proxy_forward_by_system = self
|
||||
.proxy_forward_by_system
|
||||
.unwrap_or(f.proxy_forward_by_system);
|
||||
f.no_tun = cli.no_tun.unwrap_or(f.no_tun) || cfg!(not(feature = "tun"));
|
||||
f.use_smoltcp = cli.use_smoltcp.unwrap_or(f.use_smoltcp);
|
||||
if let Some(wl) = cli.relay_network_whitelist.as_ref() {
|
||||
f.no_tun = self.no_tun.unwrap_or(f.no_tun) || cfg!(not(feature = "tun"));
|
||||
f.use_smoltcp = self.use_smoltcp.unwrap_or(f.use_smoltcp);
|
||||
if let Some(wl) = self.relay_network_whitelist.as_ref() {
|
||||
f.relay_network_whitelist = wl.join(" ");
|
||||
}
|
||||
f.disable_p2p = cli.disable_p2p.unwrap_or(f.disable_p2p);
|
||||
f.disable_udp_hole_punching = cli
|
||||
f.disable_p2p = self.disable_p2p.unwrap_or(f.disable_p2p);
|
||||
f.disable_udp_hole_punching = self
|
||||
.disable_udp_hole_punching
|
||||
.unwrap_or(f.disable_udp_hole_punching);
|
||||
f.relay_all_peer_rpc = cli.relay_all_peer_rpc.unwrap_or(f.relay_all_peer_rpc);
|
||||
f.multi_thread = cli.multi_thread.unwrap_or(f.multi_thread);
|
||||
if let Some(compression) = &cli.compression {
|
||||
f.relay_all_peer_rpc = self.relay_all_peer_rpc.unwrap_or(f.relay_all_peer_rpc);
|
||||
f.multi_thread = self.multi_thread.unwrap_or(f.multi_thread);
|
||||
if let Some(compression) = &self.compression {
|
||||
f.data_compress_algo = match compression.as_str() {
|
||||
"none" => CompressionAlgoPb::None,
|
||||
"zstd" => CompressionAlgoPb::Zstd,
|
||||
@@ -766,153 +803,40 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
||||
}
|
||||
.into();
|
||||
}
|
||||
f.bind_device = cli.bind_device.unwrap_or(f.bind_device);
|
||||
f.enable_kcp_proxy = cli.enable_kcp_proxy.unwrap_or(f.enable_kcp_proxy);
|
||||
f.disable_kcp_input = cli.disable_kcp_input.unwrap_or(f.disable_kcp_input);
|
||||
f.accept_dns = cli.accept_dns.unwrap_or(f.accept_dns);
|
||||
f.bind_device = self.bind_device.unwrap_or(f.bind_device);
|
||||
f.enable_kcp_proxy = self.enable_kcp_proxy.unwrap_or(f.enable_kcp_proxy);
|
||||
f.disable_kcp_input = self.disable_kcp_input.unwrap_or(f.disable_kcp_input);
|
||||
f.enable_quic_proxy = self.enable_quic_proxy.unwrap_or(f.enable_quic_proxy);
|
||||
f.disable_quic_input = self.disable_quic_input.unwrap_or(f.disable_quic_input);
|
||||
f.accept_dns = self.accept_dns.unwrap_or(f.accept_dns);
|
||||
f.private_mode = self.private_mode.unwrap_or(f.private_mode);
|
||||
f.foreign_relay_bps_limit = self
|
||||
.foreign_relay_bps_limit
|
||||
.unwrap_or(f.foreign_relay_bps_limit);
|
||||
cfg.set_flags(f);
|
||||
|
||||
if !cli.exit_nodes.is_empty() {
|
||||
cfg.set_exit_nodes(cli.exit_nodes.clone());
|
||||
if !self.exit_nodes.is_empty() {
|
||||
cfg.set_exit_nodes(self.exit_nodes.clone());
|
||||
}
|
||||
|
||||
Ok(cfg)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn print_event(msg: String) {
|
||||
println!(
|
||||
"{}: {}",
|
||||
chrono::Local::now().format("%Y-%m-%d %H:%M:%S"),
|
||||
msg
|
||||
);
|
||||
}
|
||||
|
||||
fn peer_conn_info_to_string(p: proto::cli::PeerConnInfo) -> String {
|
||||
format!(
|
||||
"my_peer_id: {}, dst_peer_id: {}, tunnel_info: {:?}",
|
||||
p.my_peer_id, p.peer_id, p.tunnel
|
||||
)
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
pub fn handle_event(mut events: EventBusSubscriber) -> tokio::task::JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Ok(e) = events.recv().await {
|
||||
match e {
|
||||
GlobalCtxEvent::PeerAdded(p) => {
|
||||
print_event(format!("new peer added. peer_id: {}", p));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PeerRemoved(p) => {
|
||||
print_event(format!("peer removed. peer_id: {}", p));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PeerConnAdded(p) => {
|
||||
print_event(format!(
|
||||
"new peer connection added. conn_info: {}",
|
||||
peer_conn_info_to_string(p)
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PeerConnRemoved(p) => {
|
||||
print_event(format!(
|
||||
"peer connection removed. conn_info: {}",
|
||||
peer_conn_info_to_string(p)
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ListenerAddFailed(p, msg) => {
|
||||
print_event(format!(
|
||||
"listener add failed. listener: {}, msg: {}",
|
||||
p, msg
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ListenerAcceptFailed(p, msg) => {
|
||||
print_event(format!(
|
||||
"listener accept failed. listener: {}, msg: {}",
|
||||
p, msg
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ListenerAdded(p) => {
|
||||
if p.scheme() == "ring" {
|
||||
continue;
|
||||
}
|
||||
print_event(format!("new listener added. listener: {}", p));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ConnectionAccepted(local, remote) => {
|
||||
print_event(format!(
|
||||
"new connection accepted. local: {}, remote: {}",
|
||||
local, remote
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ConnectionError(local, remote, err) => {
|
||||
print_event(format!(
|
||||
"connection error. local: {}, remote: {}, err: {}",
|
||||
local, remote, err
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::TunDeviceReady(dev) => {
|
||||
print_event(format!("tun device ready. dev: {}", dev));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::TunDeviceError(err) => {
|
||||
print_event(format!("tun device error. err: {}", err));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::Connecting(dst) => {
|
||||
print_event(format!("connecting to peer. dst: {}", dst));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ConnectError(dst, ip_version, err) => {
|
||||
print_event(format!(
|
||||
"connect to peer error. dst: {}, ip_version: {}, err: {}",
|
||||
dst, ip_version, err
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::VpnPortalClientConnected(portal, client_addr) => {
|
||||
print_event(format!(
|
||||
"vpn portal client connected. portal: {}, client_addr: {}",
|
||||
portal, client_addr
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::VpnPortalClientDisconnected(portal, client_addr) => {
|
||||
print_event(format!(
|
||||
"vpn portal client disconnected. portal: {}, client_addr: {}",
|
||||
portal, client_addr
|
||||
));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::DhcpIpv4Changed(old, new) => {
|
||||
print_event(format!("dhcp ip changed. old: {:?}, new: {:?}", old, new));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::DhcpIpv4Conflicted(ip) => {
|
||||
print_event(format!("dhcp ip conflict. ip: {:?}", ip));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PortForwardAdded(cfg) => {
|
||||
print_event(format!(
|
||||
"port forward added. local: {}, remote: {}, proto: {}",
|
||||
cfg.bind_addr.unwrap().to_string(),
|
||||
cfg.dst_addr.unwrap().to_string(),
|
||||
cfg.socket_type().as_str_name()
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
events = events.resubscribe();
|
||||
}
|
||||
impl LoggingConfigLoader for &LoggingOptions {
|
||||
fn get_console_logger_config(&self) -> ConsoleLoggerConfig {
|
||||
ConsoleLoggerConfig {
|
||||
level: self.console_log_level.clone(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn get_file_logger_config(&self) -> FileLoggerConfig {
|
||||
FileLoggerConfig {
|
||||
level: self.file_log_level.clone(),
|
||||
dir: self.file_log_dir.clone(),
|
||||
file: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
@@ -1027,10 +951,10 @@ fn win_service_main(arg: Vec<std::ffi::OsString>) {
|
||||
}
|
||||
|
||||
async fn run_main(cli: Cli) -> anyhow::Result<()> {
|
||||
let cfg = TomlConfigLoader::try_from(&cli)?;
|
||||
init_logger(&cfg, false)?;
|
||||
init_logger(&cli.logging_options, false)?;
|
||||
|
||||
if cli.config_server.is_some() {
|
||||
set_default_machine_id(cli.machine_id);
|
||||
let config_server_url_s = cli.config_server.clone().unwrap();
|
||||
let config_server_url = match url::Url::parse(&config_server_url_s) {
|
||||
Ok(u) => u,
|
||||
@@ -1069,7 +993,7 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
|
||||
let mut flags = global_ctx.get_flags();
|
||||
flags.bind_device = false;
|
||||
global_ctx.set_flags(flags);
|
||||
let hostname = match cli.hostname {
|
||||
let hostname = match cli.network_options.hostname {
|
||||
None => gethostname::gethostname().to_string_lossy().to_string(),
|
||||
Some(hostname) => hostname.to_string(),
|
||||
};
|
||||
@@ -1079,22 +1003,49 @@ async fn run_main(cli: Cli) -> anyhow::Result<()> {
|
||||
hostname,
|
||||
);
|
||||
tokio::signal::ctrl_c().await.unwrap();
|
||||
DNSTunnelConnector::new("".parse().unwrap(), global_ctx);
|
||||
return Ok(());
|
||||
}
|
||||
let manager = NetworkInstanceManager::new();
|
||||
let mut crate_cli_network =
|
||||
cli.config_file.is_none() || cli.network_options.network_name.is_some();
|
||||
if let Some(config_files) = cli.config_file {
|
||||
let config_file_count = config_files.len();
|
||||
for config_file in config_files {
|
||||
let mut cfg = TomlConfigLoader::new(&config_file)
|
||||
.with_context(|| format!("failed to load config file: {:?}", config_file))?;
|
||||
|
||||
println!("Starting easytier with config:");
|
||||
println!("############### TOML ###############\n");
|
||||
println!("{}", cfg.dump());
|
||||
println!("-----------------------------------");
|
||||
|
||||
let mut l = launcher::NetworkInstance::new(cfg).set_fetch_node_info(false);
|
||||
let _t = ScopedTask::from(handle_event(l.start().unwrap()));
|
||||
tokio::select! {
|
||||
e = l.wait() => {
|
||||
if let Some(e) = e {
|
||||
eprintln!("launcher error: {}", e);
|
||||
if cli.network_options.can_merge(&cfg, config_file_count) {
|
||||
cli.network_options.merge_into(&mut cfg).with_context(|| {
|
||||
format!("failed to merge config from cli: {:?}", config_file)
|
||||
})?;
|
||||
crate_cli_network = false;
|
||||
}
|
||||
|
||||
println!(
|
||||
"Starting easytier from config file {:?} with config:",
|
||||
config_file
|
||||
);
|
||||
println!("############### TOML ###############\n");
|
||||
println!("{}", cfg.dump());
|
||||
println!("-----------------------------------");
|
||||
manager.run_network_instance(cfg, ConfigSource::File)?;
|
||||
}
|
||||
}
|
||||
|
||||
if crate_cli_network {
|
||||
let mut cfg = TomlConfigLoader::default();
|
||||
cli.network_options
|
||||
.merge_into(&mut cfg)
|
||||
.with_context(|| format!("failed to create config from cli"))?;
|
||||
println!("Starting easytier from cli with config:");
|
||||
println!("############### TOML ###############\n");
|
||||
println!("{}", cfg.dump());
|
||||
println!("-----------------------------------");
|
||||
manager.run_network_instance(cfg, ConfigSource::Cli)?;
|
||||
}
|
||||
|
||||
tokio::select! {
|
||||
_ = manager.wait() => {
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
println!("ctrl-c received, exiting...");
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::{
|
||||
mem::MaybeUninit,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddrV4},
|
||||
sync::Arc,
|
||||
sync::{Arc, Weak},
|
||||
thread,
|
||||
time::Duration,
|
||||
};
|
||||
@@ -34,7 +34,7 @@ use super::{
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
struct IcmpNatKey {
|
||||
dst_ip: std::net::IpAddr,
|
||||
real_dst_ip: std::net::IpAddr,
|
||||
icmp_id: u16,
|
||||
icmp_seq: u16,
|
||||
}
|
||||
@@ -45,15 +45,22 @@ struct IcmpNatEntry {
|
||||
my_peer_id: PeerId,
|
||||
src_ip: IpAddr,
|
||||
start_time: std::time::Instant,
|
||||
mapped_dst_ip: std::net::Ipv4Addr,
|
||||
}
|
||||
|
||||
impl IcmpNatEntry {
|
||||
fn new(src_peer_id: PeerId, my_peer_id: PeerId, src_ip: IpAddr) -> Result<Self, Error> {
|
||||
fn new(
|
||||
src_peer_id: PeerId,
|
||||
my_peer_id: PeerId,
|
||||
src_ip: IpAddr,
|
||||
mapped_dst_ip: Ipv4Addr,
|
||||
) -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
src_peer_id,
|
||||
my_peer_id,
|
||||
src_ip,
|
||||
start_time: std::time::Instant::now(),
|
||||
mapped_dst_ip,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -65,10 +72,10 @@ type NewPacketReceiver = tokio::sync::mpsc::UnboundedReceiver<IcmpNatKey>;
|
||||
#[derive(Debug)]
|
||||
pub struct IcmpProxy {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
peer_manager: Weak<PeerManager>,
|
||||
|
||||
cidr_set: CidrSet,
|
||||
socket: std::sync::Mutex<Option<socket2::Socket>>,
|
||||
socket: std::sync::Mutex<Option<Arc<socket2::Socket>>>,
|
||||
|
||||
nat_table: IcmpNatTable,
|
||||
|
||||
@@ -78,7 +85,10 @@ pub struct IcmpProxy {
|
||||
icmp_sender: Arc<std::sync::Mutex<Option<UnboundedSender<ZCPacket>>>>,
|
||||
}
|
||||
|
||||
fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, IpAddr), Error> {
|
||||
fn socket_recv(
|
||||
socket: &Socket,
|
||||
buf: &mut [MaybeUninit<u8>],
|
||||
) -> Result<(usize, IpAddr), std::io::Error> {
|
||||
let (size, addr) = socket.recv_from(buf)?;
|
||||
let addr = match addr.as_socket() {
|
||||
None => IpAddr::V4(Ipv4Addr::UNSPECIFIED),
|
||||
@@ -87,15 +97,32 @@ fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, I
|
||||
Ok((size, addr))
|
||||
}
|
||||
|
||||
fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSender<ZCPacket>) {
|
||||
fn socket_recv_loop(
|
||||
socket: Arc<Socket>,
|
||||
nat_table: IcmpNatTable,
|
||||
sender: UnboundedSender<ZCPacket>,
|
||||
) {
|
||||
let mut buf = [0u8; 8192];
|
||||
let data: &mut [MaybeUninit<u8>] = unsafe { std::mem::transmute(&mut buf[..]) };
|
||||
|
||||
loop {
|
||||
let Ok((len, peer_ip)) = socket_recv(&socket, data) else {
|
||||
continue;
|
||||
let (len, peer_ip) = match socket_recv(&socket, data) {
|
||||
Ok((len, peer_ip)) => (len, peer_ip),
|
||||
Err(e) => {
|
||||
tracing::error!("recv icmp packet failed: {:?}", e);
|
||||
if sender.is_closed() {
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if len <= 0 {
|
||||
tracing::error!("recv empty packet, len: {}", len);
|
||||
return;
|
||||
}
|
||||
|
||||
if !peer_ip.is_ipv4() {
|
||||
continue;
|
||||
}
|
||||
@@ -114,7 +141,7 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe
|
||||
}
|
||||
|
||||
let key = IcmpNatKey {
|
||||
dst_ip: peer_ip,
|
||||
real_dst_ip: peer_ip,
|
||||
icmp_id: icmp_packet.get_identifier(),
|
||||
icmp_seq: icmp_packet.get_sequence_number(),
|
||||
};
|
||||
@@ -128,12 +155,11 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe
|
||||
continue;
|
||||
};
|
||||
|
||||
let src_v4 = ipv4_packet.get_source();
|
||||
let payload_len = len - ipv4_packet.get_header_length() as usize * 4;
|
||||
let id = ipv4_packet.get_identification();
|
||||
let _ = compose_ipv4_packet(
|
||||
&mut buf[..],
|
||||
&src_v4,
|
||||
&v.mapped_dst_ip,
|
||||
&dest_ip,
|
||||
IpNextHeaderProtocols::Icmp,
|
||||
payload_len,
|
||||
@@ -176,7 +202,7 @@ impl IcmpProxy {
|
||||
let cidr_set = CidrSet::new(global_ctx.clone());
|
||||
let ret = Self {
|
||||
global_ctx,
|
||||
peer_manager,
|
||||
peer_manager: Arc::downgrade(&peer_manager),
|
||||
cidr_set,
|
||||
socket: std::sync::Mutex::new(None),
|
||||
|
||||
@@ -208,7 +234,7 @@ impl IcmpProxy {
|
||||
let socket = self.create_raw_socket();
|
||||
match socket {
|
||||
Ok(socket) => {
|
||||
self.socket.lock().unwrap().replace(socket);
|
||||
self.socket.lock().unwrap().replace(Arc::new(socket));
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("create icmp socket failed: {:?}", e);
|
||||
@@ -241,7 +267,7 @@ impl IcmpProxy {
|
||||
let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel();
|
||||
self.icmp_sender.lock().unwrap().replace(sender.clone());
|
||||
if let Some(socket) = self.socket.lock().unwrap().as_ref() {
|
||||
let socket = socket.try_clone()?;
|
||||
let socket = socket.clone();
|
||||
let nat_table = self.nat_table.clone();
|
||||
thread::spawn(|| {
|
||||
socket_recv_loop(socket, nat_table, sender);
|
||||
@@ -254,7 +280,11 @@ impl IcmpProxy {
|
||||
while let Some(msg) = receiver.recv().await {
|
||||
let hdr = msg.peer_manager_header().unwrap();
|
||||
let to_peer_id = hdr.to_peer_id.into();
|
||||
let ret = peer_manager.send_msg(msg, to_peer_id).await;
|
||||
let Some(pm) = peer_manager.upgrade() else {
|
||||
tracing::warn!("peer manager is gone, icmp proxy send loop exit");
|
||||
return;
|
||||
};
|
||||
let ret = pm.send_msg(msg, to_peer_id).await;
|
||||
if ret.is_err() {
|
||||
tracing::error!("send icmp packet to peer failed: {:?}", ret);
|
||||
}
|
||||
@@ -271,9 +301,12 @@ impl IcmpProxy {
|
||||
}
|
||||
});
|
||||
|
||||
self.peer_manager
|
||||
.add_packet_process_pipeline(Box::new(self.clone()))
|
||||
.await;
|
||||
let Some(pm) = self.peer_manager.upgrade() else {
|
||||
tracing::warn!("peer manager is gone, icmp proxy init failed");
|
||||
return Err(anyhow::anyhow!("peer manager is gone").into());
|
||||
};
|
||||
|
||||
pm.add_packet_process_pipeline(Box::new(self.clone())).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -361,7 +394,11 @@ impl IcmpProxy {
|
||||
return None;
|
||||
}
|
||||
|
||||
if !self.cidr_set.contains_v4(ipv4.get_destination())
|
||||
let mut real_dst_ip = ipv4.get_destination();
|
||||
|
||||
if !self
|
||||
.cidr_set
|
||||
.contains_v4(ipv4.get_destination(), &mut real_dst_ip)
|
||||
&& !is_exit_node
|
||||
&& !(self.global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination())
|
||||
@@ -416,7 +453,7 @@ impl IcmpProxy {
|
||||
let icmp_seq = icmp_packet.get_sequence_number();
|
||||
|
||||
let key = IcmpNatKey {
|
||||
dst_ip: ipv4.get_destination().into(),
|
||||
real_dst_ip: real_dst_ip.into(),
|
||||
icmp_id,
|
||||
icmp_seq,
|
||||
};
|
||||
@@ -425,6 +462,7 @@ impl IcmpProxy {
|
||||
hdr.from_peer_id.into(),
|
||||
hdr.to_peer_id.into(),
|
||||
ipv4.get_source().into(),
|
||||
ipv4.get_destination(),
|
||||
)
|
||||
.ok()?;
|
||||
|
||||
@@ -432,10 +470,24 @@ impl IcmpProxy {
|
||||
tracing::info!("icmp nat table entry replaced: {:?}", old);
|
||||
}
|
||||
|
||||
if let Err(e) = self.send_icmp_packet(ipv4.get_destination(), &icmp_packet) {
|
||||
if let Err(e) = self.send_icmp_packet(real_dst_ip, &icmp_packet) {
|
||||
tracing::error!("send icmp packet failed: {:?}", e);
|
||||
}
|
||||
|
||||
Some(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for IcmpProxy {
|
||||
fn drop(&mut self) {
|
||||
tracing::info!(
|
||||
"dropping icmp proxy, {:?}",
|
||||
self.socket.lock().unwrap().as_ref()
|
||||
);
|
||||
self.socket.lock().unwrap().as_ref().and_then(|s| {
|
||||
tracing::info!("shutting down icmp socket");
|
||||
let _ = s.shutdown(std::net::Shutdown::Both);
|
||||
Some(())
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ use pnet::packet::{
|
||||
Packet as _,
|
||||
};
|
||||
use prost::Message;
|
||||
use tokio::{io::copy_bidirectional, task::JoinSet};
|
||||
use tokio::{io::copy_bidirectional, select, task::JoinSet};
|
||||
|
||||
use super::{
|
||||
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
|
||||
@@ -107,7 +107,7 @@ async fn handle_kcp_output(
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NatDstKcpConnector {
|
||||
pub(crate) kcp_endpoint: Arc<KcpEndpoint>,
|
||||
pub(crate) peer_mgr: Arc<PeerManager>,
|
||||
pub(crate) peer_mgr: Weak<PeerManager>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -120,35 +120,68 @@ impl NatDstConnector for NatDstKcpConnector {
|
||||
dst: Some(nat_dst.into()),
|
||||
};
|
||||
|
||||
let (dst_peers, _) = match nat_dst {
|
||||
SocketAddr::V4(addr) => {
|
||||
let ip = addr.ip();
|
||||
self.peer_mgr.get_msg_dst_peer(&ip).await
|
||||
}
|
||||
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
|
||||
return Err(anyhow::anyhow!("peer manager is not available").into());
|
||||
};
|
||||
|
||||
let dst_peer_id = match nat_dst {
|
||||
SocketAddr::V4(addr) => peer_mgr.get_peer_map().get_peer_id_by_ipv4(addr.ip()).await,
|
||||
SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()),
|
||||
};
|
||||
|
||||
tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peers);
|
||||
let Some(dst_peer) = dst_peer_id else {
|
||||
return Err(anyhow::anyhow!("no peer found for nat dst: {}", nat_dst).into());
|
||||
};
|
||||
|
||||
if dst_peers.len() != 1 {
|
||||
return Err(anyhow::anyhow!("no dst peer found for nat dst: {}", nat_dst).into());
|
||||
tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peer);
|
||||
|
||||
let mut connect_tasks: JoinSet<std::result::Result<ConnId, anyhow::Error>> = JoinSet::new();
|
||||
let mut retry_remain = 5;
|
||||
loop {
|
||||
select! {
|
||||
Some(Ok(Ok(ret))) = connect_tasks.join_next() => {
|
||||
// just wait for the previous connection to finish
|
||||
let stream = KcpStream::new(&self.kcp_endpoint, ret)
|
||||
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
|
||||
return Ok(stream);
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_millis(200)), if !connect_tasks.is_empty() && retry_remain > 0 => {
|
||||
// no successful connection yet, trigger another connection attempt
|
||||
}
|
||||
else => {
|
||||
// got error in connect_tasks, continue to retry
|
||||
if retry_remain == 0 && connect_tasks.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create a new connection task
|
||||
if retry_remain == 0 {
|
||||
continue;
|
||||
}
|
||||
retry_remain -= 1;
|
||||
|
||||
let kcp_endpoint = self.kcp_endpoint.clone();
|
||||
let my_peer_id = peer_mgr.my_peer_id();
|
||||
let conn_data_clone = conn_data.clone();
|
||||
|
||||
connect_tasks.spawn(async move {
|
||||
kcp_endpoint
|
||||
.connect(
|
||||
Duration::from_secs(10),
|
||||
my_peer_id,
|
||||
dst_peer,
|
||||
Bytes::from(conn_data_clone.encode_to_vec()),
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to connect to nat dst: {}", nat_dst.to_string())
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let ret = self
|
||||
.kcp_endpoint
|
||||
.connect(
|
||||
Duration::from_secs(10),
|
||||
self.peer_mgr.my_peer_id(),
|
||||
dst_peers[0],
|
||||
Bytes::from(conn_data.encode_to_vec()),
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst.to_string()))?;
|
||||
|
||||
let stream = KcpStream::new(&self.kcp_endpoint, ret)
|
||||
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
|
||||
|
||||
Ok(stream)
|
||||
Err(anyhow::anyhow!("failed to connect to nat dst: {}", nat_dst).into())
|
||||
}
|
||||
|
||||
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
|
||||
@@ -161,8 +194,9 @@ impl NatDstConnector for NatDstKcpConnector {
|
||||
_global_ctx: &GlobalCtx,
|
||||
hdr: &PeerManagerHeader,
|
||||
_ipv4: &Ipv4Packet,
|
||||
_real_dst_ip: &mut Ipv4Addr,
|
||||
) -> bool {
|
||||
return hdr.from_peer_id == hdr.to_peer_id;
|
||||
return hdr.from_peer_id == hdr.to_peer_id && hdr.is_kcp_src_modified();
|
||||
}
|
||||
|
||||
fn transport_type(&self) -> TcpProxyEntryTransportType {
|
||||
@@ -173,32 +207,41 @@ impl NatDstConnector for NatDstKcpConnector {
|
||||
#[derive(Clone)]
|
||||
struct TcpProxyForKcpSrc(Arc<TcpProxy<NatDstKcpConnector>>);
|
||||
|
||||
pub struct KcpProxySrc {
|
||||
kcp_endpoint: Arc<KcpEndpoint>,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
|
||||
tcp_proxy: TcpProxyForKcpSrc,
|
||||
tasks: JoinSet<()>,
|
||||
#[async_trait::async_trait]
|
||||
pub(crate) trait TcpProxyForKcpSrcTrait: Send + Sync + 'static {
|
||||
type Connector: NatDstConnector;
|
||||
fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>>;
|
||||
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool;
|
||||
}
|
||||
|
||||
impl TcpProxyForKcpSrc {
|
||||
#[async_trait::async_trait]
|
||||
impl TcpProxyForKcpSrcTrait for TcpProxyForKcpSrc {
|
||||
type Connector = NatDstKcpConnector;
|
||||
|
||||
fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>> {
|
||||
&self.0
|
||||
}
|
||||
|
||||
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
|
||||
let peer_map: Arc<crate::peers::peer_map::PeerMap> =
|
||||
self.0.get_peer_manager().get_peer_map();
|
||||
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
|
||||
return false;
|
||||
};
|
||||
let Some(feature_flag) = peer_map.get_peer_feature_flag(dst_peer_id).await else {
|
||||
let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else {
|
||||
return false;
|
||||
};
|
||||
feature_flag.kcp_input
|
||||
peer_info.feature_flag.map(|x| x.kcp_input).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NicPacketFilter for TcpProxyForKcpSrc {
|
||||
impl<C: NatDstConnector, T: TcpProxyForKcpSrcTrait<Connector = C>> NicPacketFilter for T {
|
||||
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
|
||||
let ret = self.0.try_process_packet_from_nic(zc_packet).await;
|
||||
let ret = self
|
||||
.get_tcp_proxy()
|
||||
.try_process_packet_from_nic(zc_packet)
|
||||
.await;
|
||||
if ret {
|
||||
return true;
|
||||
}
|
||||
@@ -225,29 +268,45 @@ impl NicPacketFilter for TcpProxyForKcpSrc {
|
||||
}
|
||||
} else {
|
||||
// if not syn packet, only allow established connection
|
||||
if !self.0.is_tcp_proxy_connection(SocketAddr::new(
|
||||
IpAddr::V4(ip_packet.get_source()),
|
||||
tcp_packet.get_source(),
|
||||
)) {
|
||||
if !self
|
||||
.get_tcp_proxy()
|
||||
.is_tcp_proxy_connection(SocketAddr::new(
|
||||
IpAddr::V4(ip_packet.get_source()),
|
||||
tcp_packet.get_source(),
|
||||
))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(my_ipv4) = self.0.get_global_ctx().get_ipv4() {
|
||||
if let Some(my_ipv4) = self.get_tcp_proxy().get_global_ctx().get_ipv4() {
|
||||
// this is a net-to-net packet, only allow it when smoltcp is enabled
|
||||
// because the syn-ack packet will not be through and handled by the tun device when
|
||||
// the source ip is in the local network
|
||||
if ip_packet.get_source() != my_ipv4.address() && !self.0.is_smoltcp_enabled() {
|
||||
if ip_packet.get_source() != my_ipv4.address()
|
||||
&& !self.get_tcp_proxy().is_smoltcp_enabled()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.0.get_my_peer_id().into();
|
||||
|
||||
let hdr = zc_packet.mut_peer_manager_header().unwrap();
|
||||
hdr.to_peer_id = self.get_tcp_proxy().get_my_peer_id().into();
|
||||
if self.get_tcp_proxy().get_transport_type() == TcpProxyEntryTransportType::Kcp {
|
||||
hdr.set_kcp_src_modified(true);
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub struct KcpProxySrc {
|
||||
kcp_endpoint: Arc<KcpEndpoint>,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
|
||||
tcp_proxy: TcpProxyForKcpSrc,
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
|
||||
impl KcpProxySrc {
|
||||
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
|
||||
let mut kcp_endpoint = create_kcp_endpoint();
|
||||
@@ -268,7 +327,7 @@ impl KcpProxySrc {
|
||||
peer_manager.clone(),
|
||||
NatDstKcpConnector {
|
||||
kcp_endpoint: kcp_endpoint.clone(),
|
||||
peer_mgr: peer_manager.clone(),
|
||||
peer_mgr: Arc::downgrade(&peer_manager),
|
||||
},
|
||||
);
|
||||
|
||||
@@ -309,6 +368,7 @@ pub struct KcpProxyDst {
|
||||
kcp_endpoint: Arc<KcpEndpoint>,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
|
||||
cidr_set: Arc<CidrSet>,
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
|
||||
@@ -324,11 +384,12 @@ impl KcpProxyDst {
|
||||
output_receiver,
|
||||
false,
|
||||
));
|
||||
|
||||
let cidr_set = CidrSet::new(peer_manager.get_global_ctx());
|
||||
Self {
|
||||
kcp_endpoint: Arc::new(kcp_endpoint),
|
||||
peer_manager,
|
||||
proxy_entries: Arc::new(DashMap::new()),
|
||||
cidr_set: Arc::new(cidr_set),
|
||||
tasks,
|
||||
}
|
||||
}
|
||||
@@ -338,6 +399,7 @@ impl KcpProxyDst {
|
||||
mut kcp_stream: KcpStream,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
proxy_entries: Arc<DashMap<ConnId, TcpProxyEntry>>,
|
||||
cidr_set: Arc<CidrSet>,
|
||||
) -> Result<()> {
|
||||
let mut conn_data = kcp_stream.conn_data().clone();
|
||||
let parsed_conn_data = KcpConnData::decode(&mut conn_data)
|
||||
@@ -350,6 +412,16 @@ impl KcpProxyDst {
|
||||
))?
|
||||
.into();
|
||||
|
||||
match dst_socket.ip() {
|
||||
IpAddr::V4(dst_v4_ip) => {
|
||||
let mut real_ip = dst_v4_ip;
|
||||
if cidr_set.contains_v4(dst_v4_ip, &mut real_ip) {
|
||||
dst_socket.set_ip(real_ip.into());
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
let conn_id = kcp_stream.conn_id();
|
||||
proxy_entries.insert(
|
||||
conn_id,
|
||||
@@ -391,6 +463,7 @@ impl KcpProxyDst {
|
||||
let kcp_endpoint = self.kcp_endpoint.clone();
|
||||
let global_ctx = self.peer_manager.get_global_ctx().clone();
|
||||
let proxy_entries = self.proxy_entries.clone();
|
||||
let cidr_set = self.cidr_set.clone();
|
||||
self.tasks.spawn(async move {
|
||||
while let Ok(conn) = kcp_endpoint.accept().await {
|
||||
let stream = KcpStream::new(&kcp_endpoint, conn)
|
||||
@@ -399,8 +472,10 @@ impl KcpProxyDst {
|
||||
|
||||
let global_ctx = global_ctx.clone();
|
||||
let proxy_entries = proxy_entries.clone();
|
||||
let cidr_set = cidr_set.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries).await;
|
||||
let _ = Self::handle_one_in_stream(stream, global_ctx, proxy_entries, cidr_set)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use dashmap::DashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
@@ -17,11 +18,15 @@ pub mod socks5;
|
||||
|
||||
pub mod kcp_proxy;
|
||||
|
||||
pub mod quic_proxy;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct CidrSet {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
cidr_set: Arc<Mutex<Vec<cidr::IpCidr>>>,
|
||||
cidr_set: Arc<Mutex<Vec<cidr::Ipv4Cidr>>>,
|
||||
tasks: JoinSet<()>,
|
||||
|
||||
mapped_to_real: Arc<DashMap<cidr::Ipv4Cidr, cidr::Ipv4Cidr>>,
|
||||
}
|
||||
|
||||
impl CidrSet {
|
||||
@@ -30,6 +35,8 @@ impl CidrSet {
|
||||
global_ctx,
|
||||
cidr_set: Arc::new(Mutex::new(vec![])),
|
||||
tasks: JoinSet::new(),
|
||||
|
||||
mapped_to_real: Arc::new(DashMap::new()),
|
||||
};
|
||||
ret.run_cidr_updater();
|
||||
ret
|
||||
@@ -38,15 +45,23 @@ impl CidrSet {
|
||||
fn run_cidr_updater(&mut self) {
|
||||
let global_ctx = self.global_ctx.clone();
|
||||
let cidr_set = self.cidr_set.clone();
|
||||
let mapped_to_real = self.mapped_to_real.clone();
|
||||
self.tasks.spawn(async move {
|
||||
let mut last_cidrs = vec![];
|
||||
loop {
|
||||
let cidrs = global_ctx.get_proxy_cidrs();
|
||||
let cidrs = global_ctx.config.get_proxy_cidrs();
|
||||
if cidrs != last_cidrs {
|
||||
last_cidrs = cidrs.clone();
|
||||
mapped_to_real.clear();
|
||||
cidr_set.lock().unwrap().clear();
|
||||
for cidr in cidrs.iter() {
|
||||
cidr_set.lock().unwrap().push(cidr.clone());
|
||||
let real_cidr = cidr.cidr;
|
||||
let mapped = cidr.mapped_cidr.unwrap_or(real_cidr.clone());
|
||||
cidr_set.lock().unwrap().push(mapped.clone());
|
||||
|
||||
if mapped != real_cidr {
|
||||
mapped_to_real.insert(mapped.clone(), real_cidr.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
@@ -54,11 +69,23 @@ impl CidrSet {
|
||||
});
|
||||
}
|
||||
|
||||
pub fn contains_v4(&self, ip: std::net::Ipv4Addr) -> bool {
|
||||
let ip = ip.into();
|
||||
pub fn contains_v4(&self, ipv4: std::net::Ipv4Addr, real_ip: &mut std::net::Ipv4Addr) -> bool {
|
||||
let ip = ipv4.into();
|
||||
let s = self.cidr_set.lock().unwrap();
|
||||
for cidr in s.iter() {
|
||||
if cidr.contains(&ip) {
|
||||
if let Some(real_cidr) = self.mapped_to_real.get(&cidr).map(|v| v.value().clone()) {
|
||||
let origin_network_bits = real_cidr.first().address().to_bits();
|
||||
let network_mask = cidr.mask().to_bits();
|
||||
|
||||
let mut converted_ip = ipv4.to_bits();
|
||||
converted_ip &= !network_mask;
|
||||
converted_ip |= origin_network_bits;
|
||||
|
||||
*real_ip = std::net::Ipv4Addr::from(converted_ip);
|
||||
} else {
|
||||
*real_ip = ipv4;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
443
easytier/src/gateway/quic_proxy.rs
Normal file
443
easytier/src/gateway/quic_proxy.rs
Normal file
@@ -0,0 +1,443 @@
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::sync::{Arc, Mutex, Weak};
|
||||
use std::{net::SocketAddr, pin::Pin};
|
||||
|
||||
use anyhow::Context;
|
||||
use dashmap::DashMap;
|
||||
use pnet::packet::ipv4::Ipv4Packet;
|
||||
use prost::Message as _;
|
||||
use quinn::{Endpoint, Incoming};
|
||||
use tokio::io::{copy_bidirectional, AsyncRead, AsyncReadExt, AsyncWrite};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::task::JoinSet;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::common::error::Result;
|
||||
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx};
|
||||
use crate::common::join_joinset_background;
|
||||
use crate::defer;
|
||||
use crate::gateway::kcp_proxy::TcpProxyForKcpSrcTrait;
|
||||
use crate::gateway::tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy};
|
||||
use crate::gateway::CidrSet;
|
||||
use crate::peers::peer_manager::PeerManager;
|
||||
use crate::proto::cli::{
|
||||
ListTcpProxyEntryRequest, ListTcpProxyEntryResponse, TcpProxyEntry, TcpProxyEntryState,
|
||||
TcpProxyEntryTransportType, TcpProxyRpc,
|
||||
};
|
||||
use crate::proto::common::ProxyDstInfo;
|
||||
use crate::proto::rpc_types;
|
||||
use crate::proto::rpc_types::controller::BaseController;
|
||||
use crate::tunnel::packet_def::PeerManagerHeader;
|
||||
use crate::tunnel::quic::{configure_client, make_server_endpoint};
|
||||
|
||||
pub struct QUICStream {
|
||||
endpoint: Option<quinn::Endpoint>,
|
||||
connection: Option<quinn::Connection>,
|
||||
sender: quinn::SendStream,
|
||||
receiver: quinn::RecvStream,
|
||||
}
|
||||
|
||||
impl AsyncRead for QUICStream {
|
||||
fn poll_read(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut tokio::io::ReadBuf<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
Pin::new(&mut this.receiver).poll_read(cx, buf)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsyncWrite for QUICStream {
|
||||
fn poll_write(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &[u8],
|
||||
) -> std::task::Poll<std::io::Result<usize>> {
|
||||
let this = self.get_mut();
|
||||
AsyncWrite::poll_write(Pin::new(&mut this.sender), cx, buf)
|
||||
}
|
||||
|
||||
fn poll_flush(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
Pin::new(&mut this.sender).poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
Pin::new(&mut this.sender).poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NatDstQUICConnector {
|
||||
pub(crate) peer_mgr: Weak<PeerManager>,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl NatDstConnector for NatDstQUICConnector {
|
||||
type DstStream = QUICStream;
|
||||
|
||||
#[tracing::instrument(skip(self), level = "debug", name = "NatDstQUICConnector::connect")]
|
||||
async fn connect(&self, src: SocketAddr, nat_dst: SocketAddr) -> Result<Self::DstStream> {
|
||||
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
|
||||
return Err(anyhow::anyhow!("peer manager is not available").into());
|
||||
};
|
||||
|
||||
let IpAddr::V4(dst_ipv4) = nat_dst.ip() else {
|
||||
return Err(anyhow::anyhow!("src must be an IPv4 address").into());
|
||||
};
|
||||
|
||||
let Some(dst_peer) = peer_mgr.get_peer_map().get_peer_id_by_ipv4(&dst_ipv4).await else {
|
||||
return Err(anyhow::anyhow!("no peer found for dst: {}", nat_dst).into());
|
||||
};
|
||||
|
||||
let Some(dst_peer_info) = peer_mgr.get_peer_map().get_route_peer_info(dst_peer).await
|
||||
else {
|
||||
return Err(anyhow::anyhow!("no peer info found for dst peer: {}", dst_peer).into());
|
||||
};
|
||||
|
||||
let Some(dst_ipv4): Option<Ipv4Addr> = dst_peer_info.ipv4_addr.map(Into::into) else {
|
||||
return Err(anyhow::anyhow!("no ipv4 found for dst peer: {}", dst_peer).into());
|
||||
};
|
||||
|
||||
let Some(quic_port) = dst_peer_info.quic_port else {
|
||||
return Err(anyhow::anyhow!("no quic port found for dst peer: {}", dst_peer).into());
|
||||
};
|
||||
|
||||
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().unwrap())
|
||||
.with_context(|| format!("failed to create QUIC endpoint for src: {}", src))?;
|
||||
endpoint.set_default_client_config(configure_client());
|
||||
|
||||
// connect to server
|
||||
let connection = {
|
||||
let _g = peer_mgr.get_global_ctx().net_ns.guard();
|
||||
endpoint
|
||||
.connect(
|
||||
SocketAddr::new(dst_ipv4.into(), quic_port as u16),
|
||||
"localhost",
|
||||
)
|
||||
.unwrap()
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to connect to NAT destination {} from {}, real dst: {}",
|
||||
nat_dst, src, dst_ipv4
|
||||
)
|
||||
})?
|
||||
};
|
||||
|
||||
let (mut w, r) = connection
|
||||
.open_bi()
|
||||
.await
|
||||
.with_context(|| "open_bi failed")?;
|
||||
|
||||
let proxy_dst_info = ProxyDstInfo {
|
||||
dst_addr: Some(nat_dst.into()),
|
||||
};
|
||||
let proxy_dst_info_buf = proxy_dst_info.encode_to_vec();
|
||||
let buf_len = proxy_dst_info_buf.len() as u8;
|
||||
w.write(&buf_len.to_le_bytes())
|
||||
.await
|
||||
.with_context(|| "failed to write proxy dst info buf len to QUIC stream")?;
|
||||
w.write(&proxy_dst_info_buf)
|
||||
.await
|
||||
.with_context(|| "failed to write proxy dst info to QUIC stream")?;
|
||||
|
||||
Ok(QUICStream {
|
||||
endpoint: Some(endpoint),
|
||||
connection: Some(connection),
|
||||
sender: w,
|
||||
receiver: r,
|
||||
})
|
||||
}
|
||||
|
||||
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn check_packet_from_peer(
|
||||
&self,
|
||||
_cidr_set: &CidrSet,
|
||||
_global_ctx: &GlobalCtx,
|
||||
hdr: &PeerManagerHeader,
|
||||
_ipv4: &Ipv4Packet,
|
||||
_real_dst_ip: &mut Ipv4Addr,
|
||||
) -> bool {
|
||||
return hdr.from_peer_id == hdr.to_peer_id && !hdr.is_kcp_src_modified();
|
||||
}
|
||||
|
||||
fn transport_type(&self) -> TcpProxyEntryTransportType {
|
||||
TcpProxyEntryTransportType::Quic
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TcpProxyForQUICSrc(Arc<TcpProxy<NatDstQUICConnector>>);
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TcpProxyForKcpSrcTrait for TcpProxyForQUICSrc {
|
||||
type Connector = NatDstQUICConnector;
|
||||
|
||||
fn get_tcp_proxy(&self) -> &Arc<TcpProxy<Self::Connector>> {
|
||||
&self.0
|
||||
}
|
||||
|
||||
async fn check_dst_allow_kcp_input(&self, dst_ip: &Ipv4Addr) -> bool {
|
||||
let peer_map: Arc<crate::peers::peer_map::PeerMap> =
|
||||
self.0.get_peer_manager().get_peer_map();
|
||||
let Some(dst_peer_id) = peer_map.get_peer_id_by_ipv4(dst_ip).await else {
|
||||
return false;
|
||||
};
|
||||
let Some(peer_info) = peer_map.get_route_peer_info(dst_peer_id).await else {
|
||||
return false;
|
||||
};
|
||||
let Some(quic_port) = peer_info.quic_port else {
|
||||
return false;
|
||||
};
|
||||
quic_port > 0
|
||||
}
|
||||
}
|
||||
|
||||
pub struct QUICProxySrc {
|
||||
peer_manager: Arc<PeerManager>,
|
||||
tcp_proxy: TcpProxyForQUICSrc,
|
||||
}
|
||||
|
||||
impl QUICProxySrc {
|
||||
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
|
||||
let tcp_proxy = TcpProxy::new(
|
||||
peer_manager.clone(),
|
||||
NatDstQUICConnector {
|
||||
peer_mgr: Arc::downgrade(&peer_manager),
|
||||
},
|
||||
);
|
||||
|
||||
Self {
|
||||
peer_manager,
|
||||
tcp_proxy: TcpProxyForQUICSrc(tcp_proxy),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(&self) {
|
||||
self.peer_manager
|
||||
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone()))
|
||||
.await;
|
||||
self.peer_manager
|
||||
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone()))
|
||||
.await;
|
||||
self.tcp_proxy.0.start(false).await.unwrap();
|
||||
}
|
||||
|
||||
pub fn get_tcp_proxy(&self) -> Arc<TcpProxy<NatDstQUICConnector>> {
|
||||
self.tcp_proxy.0.clone()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct QUICProxyDst {
|
||||
global_ctx: Arc<GlobalCtx>,
|
||||
endpoint: Arc<quinn::Endpoint>,
|
||||
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
|
||||
tasks: Arc<Mutex<JoinSet<()>>>,
|
||||
}
|
||||
|
||||
impl QUICProxyDst {
|
||||
pub fn new(global_ctx: ArcGlobalCtx) -> Result<Self> {
|
||||
let _g = global_ctx.net_ns.guard();
|
||||
let (endpoint, _) = make_server_endpoint("0.0.0.0:0".parse().unwrap())
|
||||
.map_err(|e| anyhow::anyhow!("failed to create QUIC endpoint: {}", e))?;
|
||||
let tasks = Arc::new(Mutex::new(JoinSet::new()));
|
||||
join_joinset_background(tasks.clone(), "QUICProxyDst tasks".to_string());
|
||||
Ok(Self {
|
||||
global_ctx,
|
||||
endpoint: Arc::new(endpoint),
|
||||
proxy_entries: Arc::new(DashMap::new()),
|
||||
tasks,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let endpoint = self.endpoint.clone();
|
||||
let tasks = Arc::downgrade(&self.tasks.clone());
|
||||
let ctx = self.global_ctx.clone();
|
||||
let cidr_set = Arc::new(CidrSet::new(ctx.clone()));
|
||||
let proxy_entries = self.proxy_entries.clone();
|
||||
|
||||
let task = async move {
|
||||
loop {
|
||||
match endpoint.accept().await {
|
||||
Some(conn) => {
|
||||
let Some(tasks) = tasks.upgrade() else {
|
||||
tracing::warn!(
|
||||
"QUICProxyDst tasks is not available, stopping accept loop"
|
||||
);
|
||||
return;
|
||||
};
|
||||
tasks
|
||||
.lock()
|
||||
.unwrap()
|
||||
.spawn(Self::handle_connection_with_timeout(
|
||||
conn,
|
||||
ctx.clone(),
|
||||
cidr_set.clone(),
|
||||
proxy_entries.clone(),
|
||||
));
|
||||
}
|
||||
None => {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
self.tasks.lock().unwrap().spawn(task);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn local_addr(&self) -> Result<SocketAddr> {
|
||||
self.endpoint.local_addr().map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn handle_connection_with_timeout(
|
||||
conn: Incoming,
|
||||
ctx: Arc<GlobalCtx>,
|
||||
cidr_set: Arc<CidrSet>,
|
||||
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
|
||||
) {
|
||||
let remote_addr = conn.remote_address();
|
||||
defer!(
|
||||
proxy_entries.remove(&remote_addr);
|
||||
);
|
||||
let ret = timeout(
|
||||
std::time::Duration::from_secs(10),
|
||||
Self::handle_connection(conn, ctx, cidr_set, remote_addr, proxy_entries.clone()),
|
||||
)
|
||||
.await;
|
||||
|
||||
match ret {
|
||||
Ok(Ok((mut quic_stream, mut tcp_stream))) => {
|
||||
let ret = copy_bidirectional(&mut quic_stream, &mut tcp_stream).await;
|
||||
tracing::info!(
|
||||
"QUIC connection handled, result: {:?}, remote addr: {:?}",
|
||||
ret,
|
||||
quic_stream.connection.as_ref().map(|c| c.remote_address())
|
||||
);
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
tracing::error!("Failed to handle QUIC connection: {}", e);
|
||||
}
|
||||
Err(_) => {
|
||||
tracing::warn!("Timeout while handling QUIC connection");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_connection(
|
||||
incoming: Incoming,
|
||||
ctx: ArcGlobalCtx,
|
||||
cidr_set: Arc<CidrSet>,
|
||||
proxy_entry_key: SocketAddr,
|
||||
proxy_entries: Arc<DashMap<SocketAddr, TcpProxyEntry>>,
|
||||
) -> Result<(QUICStream, TcpStream)> {
|
||||
let conn = incoming.await.with_context(|| "accept failed")?;
|
||||
let addr = conn.remote_address();
|
||||
tracing::info!("Accepted QUIC connection from {}", addr);
|
||||
let (w, mut r) = conn.accept_bi().await.with_context(|| "accept_bi failed")?;
|
||||
let len = r
|
||||
.read_u8()
|
||||
.await
|
||||
.with_context(|| "failed to read proxy dst info buf len")?;
|
||||
let mut buf = vec![0u8; len as usize];
|
||||
r.read_exact(&mut buf)
|
||||
.await
|
||||
.with_context(|| "failed to read proxy dst info")?;
|
||||
|
||||
let proxy_dst_info =
|
||||
ProxyDstInfo::decode(&buf[..]).with_context(|| "failed to decode proxy dst info")?;
|
||||
|
||||
let dst_socket: SocketAddr = proxy_dst_info
|
||||
.dst_addr
|
||||
.map(Into::into)
|
||||
.ok_or_else(|| anyhow::anyhow!("no dst addr in proxy dst info"))?;
|
||||
|
||||
let SocketAddr::V4(mut dst_socket) = dst_socket else {
|
||||
return Err(anyhow::anyhow!("NAT destination must be an IPv4 address").into());
|
||||
};
|
||||
|
||||
let mut real_ip = *dst_socket.ip();
|
||||
if cidr_set.contains_v4(*dst_socket.ip(), &mut real_ip) {
|
||||
dst_socket.set_ip(real_ip);
|
||||
}
|
||||
|
||||
if Some(*dst_socket.ip()) == ctx.get_ipv4().map(|ip| ip.address()) && ctx.no_tun() {
|
||||
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
|
||||
}
|
||||
|
||||
proxy_entries.insert(
|
||||
proxy_entry_key,
|
||||
TcpProxyEntry {
|
||||
src: Some(addr.into()),
|
||||
dst: Some(SocketAddr::V4(dst_socket).into()),
|
||||
start_time: chrono::Local::now().timestamp() as u64,
|
||||
state: TcpProxyEntryState::ConnectingDst.into(),
|
||||
transport_type: TcpProxyEntryTransportType::Quic.into(),
|
||||
},
|
||||
);
|
||||
|
||||
let connector = NatDstTcpConnector {};
|
||||
|
||||
let dst_stream = {
|
||||
let _g = ctx.net_ns.guard();
|
||||
connector
|
||||
.connect("0.0.0.0:0".parse().unwrap(), dst_socket.into())
|
||||
.await?
|
||||
};
|
||||
|
||||
if let Some(mut e) = proxy_entries.get_mut(&proxy_entry_key) {
|
||||
e.state = TcpProxyEntryState::Connected.into();
|
||||
}
|
||||
|
||||
let quic_stream = QUICStream {
|
||||
endpoint: None,
|
||||
connection: Some(conn),
|
||||
sender: w,
|
||||
receiver: r,
|
||||
};
|
||||
|
||||
Ok((quic_stream, dst_stream))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct QUICProxyDstRpcService(Weak<DashMap<SocketAddr, TcpProxyEntry>>);
|
||||
|
||||
impl QUICProxyDstRpcService {
|
||||
pub fn new(quic_proxy_dst: &QUICProxyDst) -> Self {
|
||||
Self(Arc::downgrade(&quic_proxy_dst.proxy_entries))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl TcpProxyRpc for QUICProxyDstRpcService {
|
||||
type Controller = BaseController;
|
||||
async fn list_tcp_proxy_entry(
|
||||
&self,
|
||||
_: BaseController,
|
||||
_request: ListTcpProxyEntryRequest, // Accept request of type HelloRequest
|
||||
) -> std::result::Result<ListTcpProxyEntryResponse, rpc_types::error::Error> {
|
||||
let mut reply = ListTcpProxyEntryResponse::default();
|
||||
if let Some(tcp_proxy) = self.0.upgrade() {
|
||||
for item in tcp_proxy.iter() {
|
||||
reply.entries.push(item.value().clone());
|
||||
}
|
||||
}
|
||||
Ok(reply)
|
||||
}
|
||||
}
|
||||
@@ -237,12 +237,9 @@ impl AsyncTcpConnector for Socks5KcpConnector {
|
||||
let Some(kcp_endpoint) = self.kcp_endpoint.upgrade() else {
|
||||
return Err(anyhow::anyhow!("kcp endpoint is not ready").into());
|
||||
};
|
||||
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
|
||||
return Err(anyhow::anyhow!("peer mgr is not ready").into());
|
||||
};
|
||||
let c = NatDstKcpConnector {
|
||||
kcp_endpoint,
|
||||
peer_mgr,
|
||||
peer_mgr: self.peer_mgr.clone(),
|
||||
};
|
||||
println!("connect to kcp endpoint, addr = {:?}", addr);
|
||||
let ret = c
|
||||
|
||||
@@ -52,6 +52,7 @@ pub(crate) trait NatDstConnector: Send + Sync + Clone + 'static {
|
||||
global_ctx: &GlobalCtx,
|
||||
hdr: &PeerManagerHeader,
|
||||
ipv4: &Ipv4Packet,
|
||||
real_dst_ip: &mut Ipv4Addr,
|
||||
) -> bool;
|
||||
fn transport_type(&self) -> TcpProxyEntryTransportType;
|
||||
}
|
||||
@@ -99,10 +100,11 @@ impl NatDstConnector for NatDstTcpConnector {
|
||||
global_ctx: &GlobalCtx,
|
||||
hdr: &PeerManagerHeader,
|
||||
ipv4: &Ipv4Packet,
|
||||
real_dst_ip: &mut Ipv4Addr,
|
||||
) -> bool {
|
||||
let is_exit_node = hdr.is_exit_node();
|
||||
|
||||
if !cidr_set.contains_v4(ipv4.get_destination())
|
||||
if !cidr_set.contains_v4(ipv4.get_destination(), real_dst_ip)
|
||||
&& !is_exit_node
|
||||
&& !(global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination())
|
||||
@@ -125,7 +127,8 @@ type NatDstEntryState = TcpProxyEntryState;
|
||||
pub struct NatDstEntry {
|
||||
id: uuid::Uuid,
|
||||
src: SocketAddr,
|
||||
dst: SocketAddr,
|
||||
real_dst: SocketAddr,
|
||||
mapped_dst: SocketAddr,
|
||||
start_time: Instant,
|
||||
start_time_local: chrono::DateTime<chrono::Local>,
|
||||
tasks: Mutex<JoinSet<()>>,
|
||||
@@ -133,11 +136,12 @@ pub struct NatDstEntry {
|
||||
}
|
||||
|
||||
impl NatDstEntry {
|
||||
pub fn new(src: SocketAddr, dst: SocketAddr) -> Self {
|
||||
pub fn new(src: SocketAddr, real_dst: SocketAddr, mapped_dst: SocketAddr) -> Self {
|
||||
Self {
|
||||
id: uuid::Uuid::new_v4(),
|
||||
src,
|
||||
dst,
|
||||
real_dst,
|
||||
mapped_dst,
|
||||
start_time: Instant::now(),
|
||||
start_time_local: chrono::Local::now(),
|
||||
tasks: Mutex::new(JoinSet::new()),
|
||||
@@ -148,7 +152,7 @@ impl NatDstEntry {
|
||||
fn into_pb(&self, transport_type: TcpProxyEntryTransportType) -> TcpProxyEntry {
|
||||
TcpProxyEntry {
|
||||
src: Some(self.src.clone().into()),
|
||||
dst: Some(self.dst.clone().into()),
|
||||
dst: Some(self.real_dst.clone().into()),
|
||||
start_time: self.start_time_local.timestamp() as u64,
|
||||
state: self.state.load().into(),
|
||||
transport_type: transport_type.into(),
|
||||
@@ -396,7 +400,7 @@ impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
|
||||
drop(entry);
|
||||
assert_eq!(nat_entry.src, dst_addr);
|
||||
|
||||
let IpAddr::V4(ip) = nat_entry.dst.ip() else {
|
||||
let IpAddr::V4(ip) = nat_entry.mapped_dst.ip() else {
|
||||
panic!("v4 nat entry src ip is not v4");
|
||||
};
|
||||
|
||||
@@ -416,7 +420,7 @@ impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
|
||||
let dst = ip_packet.get_destination();
|
||||
|
||||
let mut tcp_packet = MutableTcpPacket::new(ip_packet.payload_mut()).unwrap();
|
||||
tcp_packet.set_source(nat_entry.dst.port());
|
||||
tcp_packet.set_source(nat_entry.real_dst.port());
|
||||
|
||||
Self::update_tcp_packet_checksum(&mut tcp_packet, &ip, &dst);
|
||||
drop(tcp_packet);
|
||||
@@ -537,7 +541,6 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
}
|
||||
}
|
||||
tracing::error!("smoltcp stack sink exited");
|
||||
panic!("smoltcp stack sink exited");
|
||||
});
|
||||
|
||||
let peer_mgr = self.peer_manager.clone();
|
||||
@@ -559,7 +562,6 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
}
|
||||
}
|
||||
tracing::error!("smoltcp stack stream exited");
|
||||
panic!("smoltcp stack stream exited");
|
||||
});
|
||||
|
||||
let interface_config = smoltcp::iface::Config::new(smoltcp::wire::HardwareAddress::Ip);
|
||||
@@ -607,7 +609,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
let mut tcp_listener = self.get_proxy_listener().await?;
|
||||
|
||||
let global_ctx = self.global_ctx.clone();
|
||||
let tasks = self.tasks.clone();
|
||||
let tasks = Arc::downgrade(&self.tasks);
|
||||
let syn_map = self.syn_map.clone();
|
||||
let conn_map = self.conn_map.clone();
|
||||
let addr_conn_map = self.addr_conn_map.clone();
|
||||
@@ -644,7 +646,7 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
tracing::info!(
|
||||
?socket_addr,
|
||||
"tcp connection accepted for proxy, nat dst: {:?}",
|
||||
entry.dst
|
||||
entry.real_dst
|
||||
);
|
||||
assert_eq!(entry.state.load(), NatDstEntryState::SynReceived);
|
||||
|
||||
@@ -658,6 +660,11 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
let old_nat_val = conn_map.insert(entry_clone.id, entry_clone.clone());
|
||||
assert!(old_nat_val.is_none());
|
||||
|
||||
let Some(tasks) = tasks.upgrade() else {
|
||||
tracing::error!("tcp proxy tasks is dropped, exit accept loop");
|
||||
break;
|
||||
};
|
||||
|
||||
tasks.lock().unwrap().spawn(Self::connect_to_nat_dst(
|
||||
connector.clone(),
|
||||
global_ctx.clone(),
|
||||
@@ -697,14 +704,14 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
|
||||
}
|
||||
|
||||
let nat_dst = if Some(nat_entry.dst.ip())
|
||||
let nat_dst = if Some(nat_entry.real_dst.ip())
|
||||
== global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
|
||||
{
|
||||
format!("127.0.0.1:{}", nat_entry.dst.port())
|
||||
format!("127.0.0.1:{}", nat_entry.real_dst.port())
|
||||
.parse()
|
||||
.unwrap()
|
||||
} else {
|
||||
nat_entry.dst
|
||||
nat_entry.real_dst
|
||||
};
|
||||
|
||||
let _guard = global_ctx.net_ns.guard();
|
||||
@@ -818,10 +825,15 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
return None;
|
||||
}
|
||||
|
||||
if !self
|
||||
.connector
|
||||
.check_packet_from_peer(&self.cidr_set, &self.global_ctx, &hdr, &ipv4)
|
||||
{
|
||||
let mut real_dst_ip = ipv4.get_destination();
|
||||
|
||||
if !self.connector.check_packet_from_peer(
|
||||
&self.cidr_set,
|
||||
&self.global_ctx,
|
||||
&hdr,
|
||||
&ipv4,
|
||||
&mut real_dst_ip,
|
||||
) {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -839,12 +851,13 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
if is_tcp_syn && !is_tcp_ack {
|
||||
let dest_ip = ip_packet.get_destination();
|
||||
let dest_port = tcp_packet.get_destination();
|
||||
let dst = SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port));
|
||||
let mapped_dst = SocketAddr::V4(SocketAddrV4::new(dest_ip, dest_port));
|
||||
let real_dst = SocketAddr::V4(SocketAddrV4::new(real_dst_ip, dest_port));
|
||||
|
||||
let old_val = self
|
||||
.syn_map
|
||||
.insert(src, Arc::new(NatDstEntry::new(src, dst)));
|
||||
tracing::info!(src = ?src, dst = ?dst, old_entry = ?old_val, "tcp syn received");
|
||||
.insert(src, Arc::new(NatDstEntry::new(src, real_dst, mapped_dst)));
|
||||
tracing::info!(src = ?src, ?real_dst, ?mapped_dst, old_entry = ?old_val, "tcp syn received");
|
||||
} else if !self.addr_conn_map.contains_key(&src) && !self.syn_map.contains_key(&src) {
|
||||
// if not in syn map and addr conn map, may forwarding n2n packet
|
||||
return None;
|
||||
@@ -889,6 +902,10 @@ impl<C: NatDstConnector> TcpProxy<C> {
|
||||
}
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn get_transport_type(&self) -> TcpProxyEntryTransportType {
|
||||
self.connector.transport_type()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -139,6 +139,8 @@ impl UdpNatEntry {
|
||||
self: Arc<Self>,
|
||||
mut packet_sender: Sender<ZCPacket>,
|
||||
virtual_ipv4: Ipv4Addr,
|
||||
real_ipv4: Ipv4Addr,
|
||||
mapped_ipv4: Ipv4Addr,
|
||||
) {
|
||||
let (s, mut r) = tachyonix::channel(128);
|
||||
|
||||
@@ -197,6 +199,10 @@ impl UdpNatEntry {
|
||||
src_v4.set_ip(virtual_ipv4);
|
||||
}
|
||||
|
||||
if *src_v4.ip() == real_ipv4 {
|
||||
src_v4.set_ip(mapped_ipv4);
|
||||
}
|
||||
|
||||
let Ok(_) = Self::compose_ipv4_packet(
|
||||
&self_clone,
|
||||
&mut packet_sender,
|
||||
@@ -266,7 +272,10 @@ impl UdpProxy {
|
||||
return None;
|
||||
}
|
||||
|
||||
if !self.cidr_set.contains_v4(ipv4.get_destination())
|
||||
let mut real_dst_ip = ipv4.get_destination();
|
||||
if !self
|
||||
.cidr_set
|
||||
.contains_v4(ipv4.get_destination(), &mut real_dst_ip)
|
||||
&& !is_exit_node
|
||||
&& !(self.global_ctx.no_tun()
|
||||
&& Some(ipv4.get_destination())
|
||||
@@ -322,6 +331,8 @@ impl UdpProxy {
|
||||
nat_entry.clone(),
|
||||
self.sender.clone(),
|
||||
self.global_ctx.get_ipv4().map(|x| x.address())?,
|
||||
real_dst_ip,
|
||||
ipv4.get_destination(),
|
||||
)));
|
||||
}
|
||||
|
||||
@@ -335,7 +346,7 @@ impl UdpProxy {
|
||||
.parse()
|
||||
.unwrap()
|
||||
} else {
|
||||
SocketAddr::new(ipv4.get_destination().into(), udp_packet.get_destination())
|
||||
SocketAddr::new(real_dst_ip.into(), udp_packet.get_destination())
|
||||
};
|
||||
|
||||
let send_ret = {
|
||||
|
||||
@@ -298,12 +298,13 @@ impl NicPacketFilter for MagicDnsServerInstanceData {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RpcServerHook for MagicDnsServerInstanceData {
|
||||
async fn on_new_client(&self, tunnel_info: Option<TunnelInfo>) {
|
||||
println!("New client connected: {:?}", tunnel_info);
|
||||
async fn on_new_client(&self, tunnel_info: Option<TunnelInfo>)-> Result<Option<TunnelInfo>, anyhow::Error> {
|
||||
tracing::info!(?tunnel_info, "New client connected");
|
||||
Ok(tunnel_info)
|
||||
}
|
||||
|
||||
async fn on_client_disconnected(&self, tunnel_info: Option<TunnelInfo>) {
|
||||
println!("Client disconnected: {:?}", tunnel_info);
|
||||
tracing::info!(?tunnel_info, "Client disconnected");
|
||||
let Some(tunnel_info) = tunnel_info else {
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -1,25 +1,26 @@
|
||||
use std::any::Any;
|
||||
use std::collections::HashSet;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use anyhow::Context;
|
||||
use cidr::Ipv4Inet;
|
||||
use cidr::{IpCidr, Ipv4Inet};
|
||||
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::{sync::Mutex, task::JoinSet};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::common::config::ConfigLoader;
|
||||
use crate::common::error::Error;
|
||||
use crate::common::global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent};
|
||||
use crate::common::scoped_task::ScopedTask;
|
||||
use crate::common::PeerId;
|
||||
use crate::connector::direct::DirectConnectorManager;
|
||||
use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager};
|
||||
use crate::connector::udp_hole_punch::UdpHolePunchConnector;
|
||||
use crate::gateway::icmp_proxy::IcmpProxy;
|
||||
use crate::gateway::kcp_proxy::{KcpProxyDst, KcpProxyDstRpcService, KcpProxySrc};
|
||||
use crate::gateway::quic_proxy::{QUICProxyDst, QUICProxyDstRpcService, QUICProxySrc};
|
||||
use crate::gateway::tcp_proxy::{NatDstTcpConnector, TcpProxy, TcpProxyRpcService};
|
||||
use crate::gateway::udp_proxy::UdpProxy;
|
||||
use crate::peer_center::instance::PeerCenterInstance;
|
||||
@@ -29,8 +30,9 @@ use crate::peers::rpc_service::PeerManagerRpcService;
|
||||
use crate::peers::{create_packet_recv_chan, recv_packet_from_chan, PacketRecvChanReceiver};
|
||||
use crate::proto::cli::VpnPortalRpc;
|
||||
use crate::proto::cli::{GetVpnPortalInfoRequest, GetVpnPortalInfoResponse, VpnPortalInfo};
|
||||
use crate::proto::common::TunnelInfo;
|
||||
use crate::proto::peer_rpc::PeerCenterRpcServer;
|
||||
use crate::proto::rpc_impl::standalone::StandAloneServer;
|
||||
use crate::proto::rpc_impl::standalone::{RpcServerHook, StandAloneServer};
|
||||
use crate::proto::rpc_types;
|
||||
use crate::proto::rpc_types::controller::BaseController;
|
||||
use crate::tunnel::tcp::TcpTunnelListener;
|
||||
@@ -69,8 +71,7 @@ impl IpProxy {
|
||||
}
|
||||
|
||||
async fn start(&self) -> Result<(), Error> {
|
||||
if (self.global_ctx.get_proxy_cidrs().is_empty()
|
||||
|| self.global_ctx.proxy_forward_by_system()
|
||||
if (self.global_ctx.config.get_proxy_cidrs().is_empty()
|
||||
|| self.started.load(Ordering::Relaxed))
|
||||
&& !self.global_ctx.enable_exit_node()
|
||||
&& !self.global_ctx.no_tun()
|
||||
@@ -78,6 +79,12 @@ impl IpProxy {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Actually, if this node is enabled as an exit node,
|
||||
// we still can use the system stack to forward packets.
|
||||
if self.global_ctx.proxy_forward_by_system() && !self.global_ctx.no_tun() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
self.started.store(true, Ordering::Relaxed);
|
||||
self.tcp_proxy.start(true).await?;
|
||||
if let Err(e) = self.icmp_proxy.start().await {
|
||||
@@ -112,7 +119,7 @@ impl NicCtx {
|
||||
}
|
||||
|
||||
struct MagicDnsContainer {
|
||||
dns_runner_task: JoinHandle<()>,
|
||||
dns_runner_task: ScopedTask<()>,
|
||||
dns_runner_cancel_token: CancellationToken,
|
||||
}
|
||||
|
||||
@@ -133,7 +140,7 @@ impl NicCtxContainer {
|
||||
Self {
|
||||
nic_ctx: Some(Box::new(nic_ctx)),
|
||||
magic_dns: Some(MagicDnsContainer {
|
||||
dns_runner_task: task,
|
||||
dns_runner_task: task.into(),
|
||||
dns_runner_cancel_token: token,
|
||||
}),
|
||||
}
|
||||
@@ -155,6 +162,58 @@ impl NicCtxContainer {
|
||||
|
||||
type ArcNicCtx = Arc<Mutex<Option<NicCtxContainer>>>;
|
||||
|
||||
pub struct InstanceRpcServerHook {
|
||||
rpc_portal_whitelist: Vec<IpCidr>,
|
||||
}
|
||||
|
||||
impl InstanceRpcServerHook {
|
||||
pub fn new(rpc_portal_whitelist: Option<Vec<IpCidr>>) -> Self {
|
||||
let rpc_portal_whitelist = rpc_portal_whitelist
|
||||
.unwrap_or_else(|| vec!["127.0.0.0/8".parse().unwrap(), "::1/128".parse().unwrap()]);
|
||||
InstanceRpcServerHook {
|
||||
rpc_portal_whitelist,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RpcServerHook for InstanceRpcServerHook {
|
||||
async fn on_new_client(
|
||||
&self,
|
||||
tunnel_info: Option<TunnelInfo>,
|
||||
) -> Result<Option<TunnelInfo>, anyhow::Error> {
|
||||
let tunnel_info = tunnel_info.ok_or_else(|| anyhow::anyhow!("tunnel info is None"))?;
|
||||
|
||||
let remote_url = tunnel_info
|
||||
.remote_addr
|
||||
.clone()
|
||||
.ok_or_else(|| anyhow::anyhow!("remote_addr is None"))?;
|
||||
|
||||
let url_str = &remote_url.url;
|
||||
let url = url::Url::parse(url_str)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to parse remote URL '{}': {}", url_str, e))?;
|
||||
|
||||
let host = url
|
||||
.host_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("No host found in remote URL '{}'", url_str))?;
|
||||
|
||||
let ip_addr: IpAddr = host
|
||||
.parse()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to parse IP address '{}': {}", host, e))?;
|
||||
|
||||
for cidr in &self.rpc_portal_whitelist {
|
||||
if cidr.contains(&ip_addr) {
|
||||
return Ok(Some(tunnel_info));
|
||||
}
|
||||
}
|
||||
return Err(anyhow::anyhow!(
|
||||
"Rpc portal client IP {} not in whitelist: {:?}, ignoring client.",
|
||||
ip_addr,
|
||||
self.rpc_portal_whitelist
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Instance {
|
||||
inst_name: String,
|
||||
|
||||
@@ -174,6 +233,9 @@ pub struct Instance {
|
||||
kcp_proxy_src: Option<KcpProxySrc>,
|
||||
kcp_proxy_dst: Option<KcpProxyDst>,
|
||||
|
||||
quic_proxy_src: Option<QUICProxySrc>,
|
||||
quic_proxy_dst: Option<QUICProxyDst>,
|
||||
|
||||
peer_center: Arc<PeerCenterInstance>,
|
||||
|
||||
vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>,
|
||||
@@ -254,6 +316,9 @@ impl Instance {
|
||||
kcp_proxy_src: None,
|
||||
kcp_proxy_dst: None,
|
||||
|
||||
quic_proxy_src: None,
|
||||
quic_proxy_dst: None,
|
||||
|
||||
peer_center,
|
||||
|
||||
vpn_portal: Arc::new(Mutex::new(Box::new(vpn_portal_inst))),
|
||||
@@ -341,7 +406,7 @@ impl Instance {
|
||||
// Warning, if there is an IP conflict in the network when using DHCP, the IP will be automatically changed.
|
||||
fn check_dhcp_ip_conflict(&self) {
|
||||
use rand::Rng;
|
||||
let peer_manager_c = self.peer_manager.clone();
|
||||
let peer_manager_c = Arc::downgrade(&self.peer_manager.clone());
|
||||
let global_ctx_c = self.get_global_ctx();
|
||||
let nic_ctx = self.nic_ctx.clone();
|
||||
let _peer_packet_receiver = self.peer_packet_receiver.clone();
|
||||
@@ -352,6 +417,11 @@ impl Instance {
|
||||
loop {
|
||||
tokio::time::sleep(std::time::Duration::from_secs(next_sleep_time)).await;
|
||||
|
||||
let Some(peer_manager_c) = peer_manager_c.upgrade() else {
|
||||
tracing::warn!("peer manager is dropped, stop dhcp check.");
|
||||
return;
|
||||
};
|
||||
|
||||
// do not allocate ip if no peer connected
|
||||
let routes = peer_manager_c.list_routes().await;
|
||||
if routes.is_empty() {
|
||||
@@ -499,6 +569,20 @@ impl Instance {
|
||||
self.kcp_proxy_dst = Some(dst_proxy);
|
||||
}
|
||||
|
||||
if self.global_ctx.get_flags().enable_quic_proxy {
|
||||
let quic_src = QUICProxySrc::new(self.get_peer_manager()).await;
|
||||
quic_src.start().await;
|
||||
self.quic_proxy_src = Some(quic_src);
|
||||
}
|
||||
|
||||
if !self.global_ctx.get_flags().disable_quic_input {
|
||||
let quic_dst = QUICProxyDst::new(self.global_ctx.clone())?;
|
||||
quic_dst.start().await?;
|
||||
self.global_ctx
|
||||
.set_quic_proxy_port(Some(quic_dst.local_addr()?.port()));
|
||||
self.quic_proxy_dst = Some(quic_dst);
|
||||
}
|
||||
|
||||
// run after tun device created, so listener can bind to tun device, which may be required by win 10
|
||||
self.ip_proxy = Some(IpProxy::new(
|
||||
self.get_global_ctx(),
|
||||
@@ -674,6 +758,24 @@ impl Instance {
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(quic_proxy) = self.quic_proxy_src.as_ref() {
|
||||
s.registry().register(
|
||||
TcpProxyRpcServer::new(TcpProxyRpcService::new(quic_proxy.get_tcp_proxy())),
|
||||
"quic_src",
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(quic_proxy) = self.quic_proxy_dst.as_ref() {
|
||||
s.registry().register(
|
||||
TcpProxyRpcServer::new(QUICProxyDstRpcService::new(quic_proxy)),
|
||||
"quic_dst",
|
||||
);
|
||||
}
|
||||
|
||||
s.set_hook(Arc::new(InstanceRpcServerHook::new(
|
||||
self.global_ctx.config.get_rpc_portal_whitelist(),
|
||||
)));
|
||||
|
||||
let _g = self.global_ctx.net_ns.guard();
|
||||
Ok(s.serve().await.with_context(|| "rpc server start failed")?)
|
||||
}
|
||||
@@ -725,4 +827,173 @@ impl Instance {
|
||||
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx, magic_dns_runner).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn clear_resources(&mut self) {
|
||||
self.peer_manager.clear_resources().await;
|
||||
let _ = self.nic_ctx.lock().await.take();
|
||||
if let Some(rpc_server) = self.rpc_server.take() {
|
||||
rpc_server.registry().unregister_all();
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Instance {
|
||||
fn drop(&mut self) {
|
||||
let my_peer_id = self.peer_manager.my_peer_id();
|
||||
let pm = Arc::downgrade(&self.peer_manager);
|
||||
let nic_ctx = self.nic_ctx.clone();
|
||||
if let Some(rpc_server) = self.rpc_server.take() {
|
||||
rpc_server.registry().unregister_all();
|
||||
};
|
||||
tokio::spawn(async move {
|
||||
nic_ctx.lock().await.take();
|
||||
if let Some(pm) = pm.upgrade() {
|
||||
pm.clear_resources().await;
|
||||
};
|
||||
|
||||
let now = std::time::Instant::now();
|
||||
while now.elapsed().as_secs() < 1 {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
||||
if pm.strong_count() == 0 {
|
||||
tracing::info!(
|
||||
"Instance for peer {} dropped, all resources cleared.",
|
||||
my_peer_id
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert!(
|
||||
false,
|
||||
"Instance for peer {} dropped, but resources not cleared in 1 seconds.",
|
||||
my_peer_id
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
instance::instance::InstanceRpcServerHook, proto::rpc_impl::standalone::RpcServerHook,
|
||||
};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_rpc_portal_whitelist() {
|
||||
use cidr::IpCidr;
|
||||
|
||||
struct TestCase {
|
||||
remote_url: String,
|
||||
whitelist: Option<Vec<IpCidr>>,
|
||||
expected_result: bool,
|
||||
}
|
||||
|
||||
let test_cases: Vec<TestCase> = vec![
|
||||
// Test default whitelist (127.0.0.0/8, ::1/128)
|
||||
TestCase {
|
||||
remote_url: "tcp://127.0.0.1:15888".to_string(),
|
||||
whitelist: None,
|
||||
expected_result: true,
|
||||
},
|
||||
TestCase {
|
||||
remote_url: "tcp://127.1.2.3:15888".to_string(),
|
||||
whitelist: None,
|
||||
expected_result: true,
|
||||
},
|
||||
TestCase {
|
||||
remote_url: "tcp://192.168.1.1:15888".to_string(),
|
||||
whitelist: None,
|
||||
expected_result: false,
|
||||
},
|
||||
// Test custom whitelist
|
||||
TestCase {
|
||||
remote_url: "tcp://192.168.1.10:15888".to_string(),
|
||||
whitelist: Some(vec![
|
||||
"192.168.1.0/24".parse().unwrap(),
|
||||
"10.0.0.0/8".parse().unwrap(),
|
||||
]),
|
||||
expected_result: true,
|
||||
},
|
||||
TestCase {
|
||||
remote_url: "tcp://10.1.2.3:15888".to_string(),
|
||||
whitelist: Some(vec![
|
||||
"192.168.1.0/24".parse().unwrap(),
|
||||
"10.0.0.0/8".parse().unwrap(),
|
||||
]),
|
||||
expected_result: true,
|
||||
},
|
||||
TestCase {
|
||||
remote_url: "tcp://172.16.0.1:15888".to_string(),
|
||||
whitelist: Some(vec![
|
||||
"192.168.1.0/24".parse().unwrap(),
|
||||
"10.0.0.0/8".parse().unwrap(),
|
||||
]),
|
||||
expected_result: false,
|
||||
},
|
||||
// Test empty whitelist (should reject all connections)
|
||||
TestCase {
|
||||
remote_url: "tcp://127.0.0.1:15888".to_string(),
|
||||
whitelist: Some(vec![]),
|
||||
expected_result: false,
|
||||
},
|
||||
// Test broad whitelist (0.0.0.0/0 and ::/0 accept all IP addresses)
|
||||
TestCase {
|
||||
remote_url: "tcp://8.8.8.8:15888".to_string(),
|
||||
whitelist: Some(vec!["0.0.0.0/0".parse().unwrap()]),
|
||||
expected_result: true,
|
||||
},
|
||||
// Test edge case: specific IP whitelist
|
||||
TestCase {
|
||||
remote_url: "tcp://192.168.1.5:15888".to_string(),
|
||||
whitelist: Some(vec!["192.168.1.5/32".parse().unwrap()]),
|
||||
expected_result: true,
|
||||
},
|
||||
TestCase {
|
||||
remote_url: "tcp://192.168.1.6:15888".to_string(),
|
||||
whitelist: Some(vec!["192.168.1.5/32".parse().unwrap()]),
|
||||
expected_result: false,
|
||||
},
|
||||
// Test invalid URL (this case will fail during URL parsing)
|
||||
TestCase {
|
||||
remote_url: "invalid-url".to_string(),
|
||||
whitelist: None,
|
||||
expected_result: false,
|
||||
},
|
||||
// Test URL without IP address (this case will fail during IP parsing)
|
||||
TestCase {
|
||||
remote_url: "tcp://localhost:15888".to_string(),
|
||||
whitelist: None,
|
||||
expected_result: false,
|
||||
},
|
||||
];
|
||||
|
||||
for case in test_cases {
|
||||
let hook = InstanceRpcServerHook::new(case.whitelist.clone());
|
||||
let tunnel_info = Some(crate::proto::common::TunnelInfo {
|
||||
remote_addr: Some(crate::proto::common::Url {
|
||||
url: case.remote_url.clone(),
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let result = hook.on_new_client(tunnel_info).await;
|
||||
if case.expected_result {
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Expected success for remote_url:{},whitelist:{:?},but got: {:?}",
|
||||
case.remote_url,
|
||||
case.whitelist,
|
||||
result
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Expected failure for remote_url:{},whitelist:{:?},but got: {:?}",
|
||||
case.remote_url,
|
||||
case.whitelist,
|
||||
result
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
use std::{fmt::Debug, net::IpAddr, str::FromStr, sync::Arc};
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
net::IpAddr,
|
||||
str::FromStr,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_trait::async_trait;
|
||||
@@ -89,7 +94,7 @@ pub struct ListenerManager<H> {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
net_ns: NetNS,
|
||||
listeners: Vec<ListenerFactory>,
|
||||
peer_manager: Arc<H>,
|
||||
peer_manager: Weak<H>,
|
||||
|
||||
tasks: JoinSet<()>,
|
||||
}
|
||||
@@ -100,7 +105,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
|
||||
global_ctx: global_ctx.clone(),
|
||||
net_ns: global_ctx.net_ns.clone(),
|
||||
listeners: Vec::new(),
|
||||
peer_manager,
|
||||
peer_manager: Arc::downgrade(&peer_manager),
|
||||
tasks: JoinSet::new(),
|
||||
}
|
||||
}
|
||||
@@ -137,6 +142,8 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
|
||||
if self.global_ctx.config.get_flags().enable_ipv6
|
||||
&& !is_url_host_ipv6(&l)
|
||||
&& is_url_host_unspecified(&l)
|
||||
// quic enables dual-stack by default, may conflict with v4 listener
|
||||
&& l.scheme() != "quic"
|
||||
{
|
||||
let mut ipv6_listener = l.clone();
|
||||
ipv6_listener
|
||||
@@ -169,7 +176,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
|
||||
#[tracing::instrument(skip(creator))]
|
||||
async fn run_listener(
|
||||
creator: Arc<ListenerCreator>,
|
||||
peer_manager: Arc<H>,
|
||||
peer_manager: Weak<H>,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
) {
|
||||
loop {
|
||||
@@ -221,6 +228,10 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
|
||||
let peer_manager = peer_manager.clone();
|
||||
let global_ctx = global_ctx.clone();
|
||||
tokio::spawn(async move {
|
||||
let Some(peer_manager) = peer_manager.upgrade() else {
|
||||
tracing::error!("peer manager is gone, cannot handle tunnel");
|
||||
return;
|
||||
};
|
||||
let server_ret = peer_manager.handle_tunnel(ret).await;
|
||||
if let Err(e) = &server_ret {
|
||||
global_ctx.issue_event(GlobalCtxEvent::ConnectionError(
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
io,
|
||||
net::Ipv4Addr,
|
||||
pin::Pin,
|
||||
@@ -569,26 +570,26 @@ impl NicCtx {
|
||||
let ifname = nic.ifname().to_owned();
|
||||
|
||||
self.tasks.spawn(async move {
|
||||
let mut cur_proxy_cidrs = vec![];
|
||||
let mut cur_proxy_cidrs = BTreeSet::new();
|
||||
loop {
|
||||
let mut proxy_cidrs = vec![];
|
||||
let mut proxy_cidrs = BTreeSet::new();
|
||||
let routes = peer_mgr.list_routes().await;
|
||||
for r in routes {
|
||||
for cidr in r.proxy_cidrs {
|
||||
let Ok(cidr) = cidr.parse::<cidr::Ipv4Cidr>() else {
|
||||
continue;
|
||||
};
|
||||
proxy_cidrs.push(cidr);
|
||||
proxy_cidrs.insert(cidr);
|
||||
}
|
||||
}
|
||||
// add vpn portal cidr to proxy_cidrs
|
||||
if let Some(vpn_cfg) = global_ctx.config.get_vpn_portal_config() {
|
||||
proxy_cidrs.push(vpn_cfg.client_cidr);
|
||||
proxy_cidrs.insert(vpn_cfg.client_cidr);
|
||||
}
|
||||
|
||||
if let Some(routes) = global_ctx.config.get_routes() {
|
||||
// if has manual routes, just override entire proxy_cidrs
|
||||
proxy_cidrs = routes;
|
||||
proxy_cidrs = routes.into_iter().collect();
|
||||
}
|
||||
|
||||
// if route is in cur_proxy_cidrs but not in proxy_cidrs, delete it.
|
||||
@@ -657,6 +658,15 @@ impl NicCtx {
|
||||
let _ = RegistryManager::reg_change_catrgory_in_profile(&dev_name);
|
||||
}
|
||||
|
||||
#[cfg(any(target_os = "macos", target_os = "freebsd"))]
|
||||
{
|
||||
// remove the 10.0.0.0/24 route (which is added by rust-tun by default)
|
||||
let _ = nic
|
||||
.ifcfg
|
||||
.remove_ipv4_route(&nic.ifname(), "10.0.0.0".parse().unwrap(), 24)
|
||||
.await;
|
||||
}
|
||||
|
||||
self.global_ctx
|
||||
.issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string()));
|
||||
ret
|
||||
|
||||
564
easytier/src/instance_manager.rs
Normal file
564
easytier/src/instance_manager.rs
Normal file
@@ -0,0 +1,564 @@
|
||||
use std::{collections::BTreeMap, sync::Arc};
|
||||
|
||||
use dashmap::DashMap;
|
||||
|
||||
use crate::{
|
||||
common::{
|
||||
config::{ConfigLoader, TomlConfigLoader},
|
||||
global_ctx::{EventBusSubscriber, GlobalCtxEvent},
|
||||
scoped_task::ScopedTask,
|
||||
},
|
||||
launcher::{ConfigSource, NetworkInstance, NetworkInstanceRunningInfo},
|
||||
proto,
|
||||
};
|
||||
|
||||
pub struct NetworkInstanceManager {
|
||||
instance_map: Arc<DashMap<uuid::Uuid, NetworkInstance>>,
|
||||
instance_stop_tasks: Arc<DashMap<uuid::Uuid, ScopedTask<()>>>,
|
||||
stop_check_notifier: Arc<tokio::sync::Notify>,
|
||||
}
|
||||
|
||||
impl NetworkInstanceManager {
|
||||
pub fn new() -> Self {
|
||||
NetworkInstanceManager {
|
||||
instance_map: Arc::new(DashMap::new()),
|
||||
instance_stop_tasks: Arc::new(DashMap::new()),
|
||||
stop_check_notifier: Arc::new(tokio::sync::Notify::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn start_instance_task(&self, instance_id: uuid::Uuid) -> Result<(), anyhow::Error> {
|
||||
let instance = self
|
||||
.instance_map
|
||||
.get(&instance_id)
|
||||
.ok_or_else(|| anyhow::anyhow!("instance {} not found", instance_id))?;
|
||||
|
||||
match instance.get_config_source() {
|
||||
ConfigSource::FFI | ConfigSource::GUI => {
|
||||
// FFI and GUI have no tokio runtime, so we don't need to spawn a task
|
||||
return Ok(());
|
||||
}
|
||||
_ => {
|
||||
if tokio::runtime::Handle::try_current().is_err() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"tokio runtime not found, cannot start instance task"
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let instance_stop_notifier = instance.get_stop_notifier();
|
||||
let instance_config_source = instance.get_config_source();
|
||||
let instance_event_receiver = match instance.get_config_source() {
|
||||
ConfigSource::Cli | ConfigSource::File | ConfigSource::Web => {
|
||||
Some(instance.subscribe_event())
|
||||
}
|
||||
ConfigSource::GUI | ConfigSource::FFI => None,
|
||||
};
|
||||
|
||||
let instance_map = self.instance_map.clone();
|
||||
let instance_stop_tasks = self.instance_stop_tasks.clone();
|
||||
|
||||
let stop_check_notifier = self.stop_check_notifier.clone();
|
||||
self.instance_stop_tasks.insert(
|
||||
instance_id,
|
||||
ScopedTask::from(tokio::spawn(async move {
|
||||
let Some(instance_stop_notifier) = instance_stop_notifier else {
|
||||
return;
|
||||
};
|
||||
let _t = if let Some(event) = instance_event_receiver.flatten() {
|
||||
Some(ScopedTask::from(handle_event(instance_id, event)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
instance_stop_notifier.notified().await;
|
||||
if let Some(instance) = instance_map.get(&instance_id) {
|
||||
if let Some(e) = instance.get_latest_error_msg() {
|
||||
tracing::error!(?e, ?instance_id, "instance stopped with error");
|
||||
eprintln!("instance {} stopped with error: {}", instance_id, e);
|
||||
}
|
||||
}
|
||||
match instance_config_source {
|
||||
ConfigSource::Cli | ConfigSource::File => {
|
||||
instance_map.remove(&instance_id);
|
||||
}
|
||||
ConfigSource::Web | ConfigSource::GUI | ConfigSource::FFI => {}
|
||||
}
|
||||
instance_stop_tasks.remove(&instance_id);
|
||||
stop_check_notifier.notify_waiters();
|
||||
})),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn run_network_instance(
|
||||
&self,
|
||||
cfg: TomlConfigLoader,
|
||||
source: ConfigSource,
|
||||
) -> Result<uuid::Uuid, anyhow::Error> {
|
||||
let instance_id = cfg.get_id();
|
||||
if self.instance_map.contains_key(&instance_id) {
|
||||
anyhow::bail!("instance {} already exists", instance_id);
|
||||
}
|
||||
|
||||
let mut instance = NetworkInstance::new(cfg, source);
|
||||
instance.start()?;
|
||||
|
||||
self.instance_map.insert(instance_id, instance);
|
||||
self.start_instance_task(instance_id)?;
|
||||
Ok(instance_id)
|
||||
}
|
||||
|
||||
pub fn retain_network_instance(
|
||||
&self,
|
||||
instance_ids: Vec<uuid::Uuid>,
|
||||
) -> Result<Vec<uuid::Uuid>, anyhow::Error> {
|
||||
self.instance_map.retain(|k, _| instance_ids.contains(k));
|
||||
Ok(self.list_network_instance_ids())
|
||||
}
|
||||
|
||||
pub fn delete_network_instance(
|
||||
&self,
|
||||
instance_ids: Vec<uuid::Uuid>,
|
||||
) -> Result<Vec<uuid::Uuid>, anyhow::Error> {
|
||||
self.instance_map.retain(|k, _| !instance_ids.contains(k));
|
||||
Ok(self.list_network_instance_ids())
|
||||
}
|
||||
|
||||
pub fn collect_network_infos(
|
||||
&self,
|
||||
) -> Result<BTreeMap<uuid::Uuid, NetworkInstanceRunningInfo>, anyhow::Error> {
|
||||
let mut ret = BTreeMap::new();
|
||||
for instance in self.instance_map.iter() {
|
||||
if let Some(info) = instance.get_running_info() {
|
||||
ret.insert(instance.key().clone(), info);
|
||||
}
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
pub fn list_network_instance_ids(&self) -> Vec<uuid::Uuid> {
|
||||
self.instance_map
|
||||
.iter()
|
||||
.map(|item| item.key().clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_network_instance_name(&self, instance_id: &uuid::Uuid) -> Option<String> {
|
||||
self.instance_map
|
||||
.get(instance_id)
|
||||
.map(|instance| instance.value().get_inst_name())
|
||||
}
|
||||
|
||||
pub fn set_tun_fd(&self, instance_id: &uuid::Uuid, fd: i32) -> Result<(), anyhow::Error> {
|
||||
let mut instance = self
|
||||
.instance_map
|
||||
.get_mut(instance_id)
|
||||
.ok_or_else(|| anyhow::anyhow!("instance not found"))?;
|
||||
instance.set_tun_fd(fd);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn wait(&self) {
|
||||
while self.instance_map.len() > 0 {
|
||||
self.stop_check_notifier.notified().await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
fn handle_event(
|
||||
instance_id: uuid::Uuid,
|
||||
mut events: EventBusSubscriber,
|
||||
) -> tokio::task::JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if let Ok(e) = events.recv().await {
|
||||
match e {
|
||||
GlobalCtxEvent::PeerAdded(p) => {
|
||||
print_event(instance_id, format!("new peer added. peer_id: {}", p));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PeerRemoved(p) => {
|
||||
print_event(instance_id, format!("peer removed. peer_id: {}", p));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PeerConnAdded(p) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"new peer connection added. conn_info: {}",
|
||||
peer_conn_info_to_string(p)
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PeerConnRemoved(p) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"peer connection removed. conn_info: {}",
|
||||
peer_conn_info_to_string(p)
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ListenerAddFailed(p, msg) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!("listener add failed. listener: {}, msg: {}", p, msg),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ListenerAcceptFailed(p, msg) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!("listener accept failed. listener: {}, msg: {}", p, msg),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ListenerAdded(p) => {
|
||||
if p.scheme() == "ring" {
|
||||
continue;
|
||||
}
|
||||
print_event(instance_id, format!("new listener added. listener: {}", p));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ConnectionAccepted(local, remote) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"new connection accepted. local: {}, remote: {}",
|
||||
local, remote
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ConnectionError(local, remote, err) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"connection error. local: {}, remote: {}, err: {}",
|
||||
local, remote, err
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::TunDeviceReady(dev) => {
|
||||
print_event(instance_id, format!("tun device ready. dev: {}", dev));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::TunDeviceError(err) => {
|
||||
print_event(instance_id, format!("tun device error. err: {}", err));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::Connecting(dst) => {
|
||||
print_event(instance_id, format!("connecting to peer. dst: {}", dst));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::ConnectError(dst, ip_version, err) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"connect to peer error. dst: {}, ip_version: {}, err: {}",
|
||||
dst, ip_version, err
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::VpnPortalClientConnected(portal, client_addr) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"vpn portal client connected. portal: {}, client_addr: {}",
|
||||
portal, client_addr
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::VpnPortalClientDisconnected(portal, client_addr) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"vpn portal client disconnected. portal: {}, client_addr: {}",
|
||||
portal, client_addr
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::DhcpIpv4Changed(old, new) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!("dhcp ip changed. old: {:?}, new: {:?}", old, new),
|
||||
);
|
||||
}
|
||||
|
||||
GlobalCtxEvent::DhcpIpv4Conflicted(ip) => {
|
||||
print_event(instance_id, format!("dhcp ip conflict. ip: {:?}", ip));
|
||||
}
|
||||
|
||||
GlobalCtxEvent::PortForwardAdded(cfg) => {
|
||||
print_event(
|
||||
instance_id,
|
||||
format!(
|
||||
"port forward added. local: {}, remote: {}, proto: {}",
|
||||
cfg.bind_addr.unwrap().to_string(),
|
||||
cfg.dst_addr.unwrap().to_string(),
|
||||
cfg.socket_type().as_str_name()
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
events = events.resubscribe();
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn print_event(instance_id: uuid::Uuid, msg: String) {
|
||||
println!(
|
||||
"{}: [{}] {}",
|
||||
chrono::Local::now().format("%Y-%m-%d %H:%M:%S"),
|
||||
instance_id,
|
||||
msg
|
||||
);
|
||||
}
|
||||
|
||||
fn peer_conn_info_to_string(p: proto::cli::PeerConnInfo) -> String {
|
||||
format!(
|
||||
"my_peer_id: {}, dst_peer_id: {}, tunnel_info: {:?}",
|
||||
p.my_peer_id, p.peer_id, p.tunnel
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::common::config::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn it_works() {
|
||||
let manager = NetworkInstanceManager::new();
|
||||
let cfg_str = r#"
|
||||
listeners = []
|
||||
"#;
|
||||
|
||||
let port = crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
|
||||
|
||||
let instance_id1 = manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str)
|
||||
.map(|c| {
|
||||
c.set_listeners(vec![format!("tcp://0.0.0.0:{}", port).parse().unwrap()]);
|
||||
c
|
||||
})
|
||||
.unwrap(),
|
||||
ConfigSource::Cli,
|
||||
)
|
||||
.unwrap();
|
||||
let instance_id2 = manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::File,
|
||||
)
|
||||
.unwrap();
|
||||
let instance_id3 = manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::GUI,
|
||||
)
|
||||
.unwrap();
|
||||
let instance_id4 = manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::Web,
|
||||
)
|
||||
.unwrap();
|
||||
let instance_id5 = manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::FFI,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await; // to make instance actually started
|
||||
|
||||
assert!(!crate::utils::check_tcp_available(port));
|
||||
|
||||
assert!(manager.instance_map.contains_key(&instance_id1));
|
||||
assert!(manager.instance_map.contains_key(&instance_id2));
|
||||
assert!(manager.instance_map.contains_key(&instance_id3));
|
||||
assert!(manager.instance_map.contains_key(&instance_id4));
|
||||
assert!(manager.instance_map.contains_key(&instance_id5));
|
||||
assert_eq!(manager.list_network_instance_ids().len(), 5);
|
||||
assert_eq!(manager.instance_stop_tasks.len(), 3); // FFI and GUI instance does not have a stop task
|
||||
|
||||
manager
|
||||
.delete_network_instance(vec![instance_id3, instance_id4, instance_id5])
|
||||
.unwrap();
|
||||
assert!(!manager.instance_map.contains_key(&instance_id3));
|
||||
assert!(!manager.instance_map.contains_key(&instance_id4));
|
||||
assert!(!manager.instance_map.contains_key(&instance_id5));
|
||||
assert_eq!(manager.list_network_instance_ids().len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_tokio_runtime() {
|
||||
let manager = NetworkInstanceManager::new();
|
||||
let cfg_str = r#"
|
||||
listeners = []
|
||||
"#;
|
||||
|
||||
let port = crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
|
||||
|
||||
assert!(manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::Cli,
|
||||
)
|
||||
.is_err());
|
||||
assert!(manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::File,
|
||||
)
|
||||
.is_err());
|
||||
assert!(manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str)
|
||||
.map(|c| {
|
||||
c.set_listeners(vec![format!("tcp://0.0.0.0:{}", port).parse().unwrap()]);
|
||||
c
|
||||
})
|
||||
.unwrap(),
|
||||
ConfigSource::GUI,
|
||||
)
|
||||
.is_ok());
|
||||
assert!(manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::Web,
|
||||
)
|
||||
.is_err());
|
||||
assert!(manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str).unwrap(),
|
||||
ConfigSource::FFI,
|
||||
)
|
||||
.is_ok());
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_secs(1)); // wait instance actually started
|
||||
|
||||
assert!(!crate::utils::check_tcp_available(port));
|
||||
|
||||
assert_eq!(manager.list_network_instance_ids().len(), 5);
|
||||
assert_eq!(
|
||||
manager
|
||||
.instance_map
|
||||
.iter()
|
||||
.map(|item| item.is_easytier_running())
|
||||
.filter(|x| *x)
|
||||
.count(),
|
||||
5
|
||||
); // stop tasks failed not affect instance running status
|
||||
assert_eq!(manager.instance_stop_tasks.len(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_single_instance_failed() {
|
||||
let free_tcp_port =
|
||||
crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
|
||||
|
||||
for config_source in [ConfigSource::Cli, ConfigSource::File] {
|
||||
let _port_holder =
|
||||
std::net::TcpListener::bind(format!("0.0.0.0:{}", free_tcp_port)).unwrap();
|
||||
|
||||
let cfg_str = format!(
|
||||
r#"
|
||||
listeners = ["tcp://0.0.0.0:{}"]
|
||||
"#,
|
||||
free_tcp_port
|
||||
);
|
||||
|
||||
let manager = NetworkInstanceManager::new();
|
||||
manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
|
||||
config_source.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
tokio::select! {
|
||||
_ = manager.wait() => {
|
||||
assert_eq!(manager.list_network_instance_ids().len(), 0);
|
||||
}
|
||||
_ = tokio::time::sleep(std::time::Duration::from_secs(5)) => {
|
||||
panic!("instance manager with single failed instance({:?}) should not running", config_source);
|
||||
}
|
||||
}
|
||||
}
|
||||
for config_source in [ConfigSource::Web, ConfigSource::GUI, ConfigSource::FFI] {
|
||||
let _port_holder =
|
||||
std::net::TcpListener::bind(format!("0.0.0.0:{}", free_tcp_port)).unwrap();
|
||||
|
||||
let cfg_str = format!(
|
||||
r#"
|
||||
listeners = ["tcp://0.0.0.0:{}"]
|
||||
"#,
|
||||
free_tcp_port
|
||||
);
|
||||
|
||||
let manager = NetworkInstanceManager::new();
|
||||
manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
|
||||
config_source.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(manager.list_network_instance_ids().len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_instances_one_failed() {
|
||||
let free_tcp_port =
|
||||
crate::utils::find_free_tcp_port(10012..65534).expect("no free tcp port found");
|
||||
|
||||
let manager = NetworkInstanceManager::new();
|
||||
let cfg_str = format!(
|
||||
r#"
|
||||
listeners = ["tcp://0.0.0.0:{}"]
|
||||
[flags]
|
||||
enable_ipv6 = false
|
||||
"#,
|
||||
free_tcp_port
|
||||
);
|
||||
|
||||
manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
|
||||
ConfigSource::Cli,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
manager
|
||||
.run_network_instance(
|
||||
TomlConfigLoader::new_from_str(cfg_str.as_str()).unwrap(),
|
||||
ConfigSource::Cli,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
tokio::select! {
|
||||
_ = manager.wait() => {
|
||||
panic!("instance manager with multiple instances one failed should still running");
|
||||
}
|
||||
_ = tokio::time::sleep(std::time::Duration::from_secs(2)) => {
|
||||
assert_eq!(manager.list_network_instance_ids().len(), 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
net::SocketAddr,
|
||||
sync::{atomic::AtomicBool, Arc, RwLock},
|
||||
};
|
||||
|
||||
@@ -136,8 +135,6 @@ impl EasyTierLauncher {
|
||||
fetch_node_info: bool,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let mut instance = Instance::new(cfg);
|
||||
let peer_mgr = instance.get_peer_manager();
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
|
||||
// Subscribe to global context events
|
||||
@@ -165,7 +162,7 @@ impl EasyTierLauncher {
|
||||
if fetch_node_info {
|
||||
let data_c = data.clone();
|
||||
let global_ctx_c = instance.get_global_ctx();
|
||||
let peer_mgr_c = peer_mgr.clone();
|
||||
let peer_mgr_c = instance.get_peer_manager().clone();
|
||||
let vpn_portal = instance.get_vpn_portal_inst();
|
||||
tasks.spawn(async move {
|
||||
loop {
|
||||
@@ -211,12 +208,10 @@ impl EasyTierLauncher {
|
||||
tasks.abort_all();
|
||||
drop(tasks);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
instance.clear_resources().await;
|
||||
drop(instance);
|
||||
|
||||
fn check_tcp_available(port: u16) -> bool {
|
||||
let s = format!("0.0.0.0:{}", port).parse::<SocketAddr>().unwrap();
|
||||
std::net::TcpListener::bind(s).is_ok()
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn select_proper_rpc_port(cfg: &TomlConfigLoader) {
|
||||
@@ -225,13 +220,12 @@ impl EasyTierLauncher {
|
||||
};
|
||||
|
||||
if f.port() == 0 {
|
||||
for i in 15888..15900 {
|
||||
if Self::check_tcp_available(i) {
|
||||
f.set_port(i);
|
||||
cfg.set_rpc_portal(f);
|
||||
break;
|
||||
}
|
||||
}
|
||||
let Some(port) = crate::utils::find_free_tcp_port(15888..15900) else {
|
||||
tracing::warn!("No free port found for RPC portal, skipping setting RPC portal");
|
||||
return;
|
||||
};
|
||||
f.set_port(port);
|
||||
cfg.set_rpc_portal(f);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,25 +337,40 @@ impl Drop for EasyTierLauncher {
|
||||
|
||||
pub type NetworkInstanceRunningInfo = crate::proto::web::NetworkInstanceRunningInfo;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ConfigSource {
|
||||
Cli,
|
||||
File,
|
||||
Web,
|
||||
GUI,
|
||||
FFI,
|
||||
}
|
||||
|
||||
pub struct NetworkInstance {
|
||||
config: TomlConfigLoader,
|
||||
launcher: Option<EasyTierLauncher>,
|
||||
|
||||
fetch_node_info: bool,
|
||||
config_source: ConfigSource,
|
||||
}
|
||||
|
||||
impl NetworkInstance {
|
||||
pub fn new(config: TomlConfigLoader) -> Self {
|
||||
pub fn new(config: TomlConfigLoader, source: ConfigSource) -> Self {
|
||||
Self {
|
||||
config,
|
||||
launcher: None,
|
||||
fetch_node_info: true,
|
||||
config_source: source,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_fetch_node_info(mut self, fetch_node_info: bool) -> Self {
|
||||
self.fetch_node_info = fetch_node_info;
|
||||
self
|
||||
fn get_fetch_node_info(&self) -> bool {
|
||||
match self.config_source {
|
||||
ConfigSource::Cli | ConfigSource::File => false,
|
||||
ConfigSource::Web | ConfigSource::GUI | ConfigSource::FFI => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_config_source(&self) -> ConfigSource {
|
||||
self.config_source.clone()
|
||||
}
|
||||
|
||||
pub fn is_easytier_running(&self) -> bool {
|
||||
@@ -395,6 +404,10 @@ impl NetworkInstance {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_inst_name(&self) -> String {
|
||||
self.config.get_inst_name()
|
||||
}
|
||||
|
||||
pub fn set_tun_fd(&mut self, tun_fd: i32) {
|
||||
if let Some(launcher) = self.launcher.as_ref() {
|
||||
launcher.data.tun_fd.write().unwrap().replace(tun_fd);
|
||||
@@ -406,7 +419,7 @@ impl NetworkInstance {
|
||||
return Ok(self.subscribe_event().unwrap());
|
||||
}
|
||||
|
||||
let launcher = EasyTierLauncher::new(self.fetch_node_info);
|
||||
let launcher = EasyTierLauncher::new(self.get_fetch_node_info());
|
||||
self.launcher = Some(launcher);
|
||||
let ev = self.subscribe_event().unwrap();
|
||||
|
||||
@@ -418,7 +431,7 @@ impl NetworkInstance {
|
||||
Ok(ev)
|
||||
}
|
||||
|
||||
fn subscribe_event(&self) -> Option<broadcast::Receiver<GlobalCtxEvent>> {
|
||||
pub fn subscribe_event(&self) -> Option<broadcast::Receiver<GlobalCtxEvent>> {
|
||||
if let Some(launcher) = self.launcher.as_ref() {
|
||||
Some(launcher.data.event_subscriber.read().unwrap().subscribe())
|
||||
} else {
|
||||
@@ -426,9 +439,16 @@ impl NetworkInstance {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn wait(&self) -> Option<String> {
|
||||
pub fn get_stop_notifier(&self) -> Option<Arc<tokio::sync::Notify>> {
|
||||
if let Some(launcher) = self.launcher.as_ref() {
|
||||
Some(launcher.data.instance_stop_notifier.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_latest_error_msg(&self) -> Option<String> {
|
||||
if let Some(launcher) = self.launcher.as_ref() {
|
||||
launcher.data.instance_stop_notifier.notified().await;
|
||||
launcher.error_msg.read().unwrap().clone()
|
||||
} else {
|
||||
None
|
||||
@@ -436,6 +456,36 @@ impl NetworkInstance {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_proxy_network_to_config(
|
||||
proxy_network: &str,
|
||||
cfg: &TomlConfigLoader,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let parts: Vec<&str> = proxy_network.split("->").collect();
|
||||
let real_cidr = parts[0]
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse proxy network: {}", parts[0]))?;
|
||||
|
||||
if parts.len() > 2 {
|
||||
return Err(anyhow::anyhow!(
|
||||
"invalid proxy network format: {}, support format: <real_cidr> or <real_cidr>-><mapped_cidr>, example:
|
||||
10.0.0.0/24 or 10.0.0.0/24->192.168.0.0/24",
|
||||
proxy_network
|
||||
));
|
||||
}
|
||||
|
||||
let mapped_cidr = if parts.len() == 2 {
|
||||
Some(
|
||||
parts[1]
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse mapped network: {}", parts[1]))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
cfg.add_proxy_cidr(real_cidr, mapped_cidr);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub type NetworkingMethod = crate::proto::web::NetworkingMethod;
|
||||
pub type NetworkConfig = crate::proto::web::NetworkConfig;
|
||||
|
||||
@@ -515,10 +565,7 @@ impl NetworkConfig {
|
||||
cfg.set_listeners(listener_urls);
|
||||
|
||||
for n in self.proxy_cidrs.iter() {
|
||||
cfg.add_proxy_cidr(
|
||||
n.parse()
|
||||
.with_context(|| format!("failed to parse proxy network: {}", n))?,
|
||||
);
|
||||
add_proxy_network_to_config(n, &cfg)?;
|
||||
}
|
||||
|
||||
cfg.set_rpc_portal(
|
||||
@@ -527,6 +574,20 @@ impl NetworkConfig {
|
||||
.with_context(|| format!("failed to parse rpc portal port: {:?}", self.rpc_port))?,
|
||||
);
|
||||
|
||||
if self.rpc_portal_whitelists.is_empty() {
|
||||
cfg.set_rpc_portal_whitelist(None);
|
||||
} else {
|
||||
cfg.set_rpc_portal_whitelist(Some(
|
||||
self.rpc_portal_whitelists
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.parse()
|
||||
.with_context(|| format!("failed to parse rpc portal whitelist: {}", s))
|
||||
})
|
||||
.collect::<Result<Vec<_>, _>>()?,
|
||||
));
|
||||
}
|
||||
|
||||
if self.enable_vpn_portal.unwrap_or_default() {
|
||||
let cidr = format!(
|
||||
"{}/{}",
|
||||
@@ -624,6 +685,14 @@ impl NetworkConfig {
|
||||
flags.disable_kcp_input = disable_kcp_input;
|
||||
}
|
||||
|
||||
if let Some(enable_quic_proxy) = self.enable_quic_proxy {
|
||||
flags.enable_quic_proxy = enable_quic_proxy;
|
||||
}
|
||||
|
||||
if let Some(disable_quic_input) = self.disable_quic_input {
|
||||
flags.disable_quic_input = disable_quic_input;
|
||||
}
|
||||
|
||||
if let Some(disable_p2p) = self.disable_p2p {
|
||||
flags.disable_p2p = disable_p2p;
|
||||
}
|
||||
@@ -676,7 +745,403 @@ impl NetworkConfig {
|
||||
flags.mtu = mtu as u32;
|
||||
}
|
||||
|
||||
if let Some(enable_private_mode) = self.enable_private_mode {
|
||||
flags.private_mode = enable_private_mode;
|
||||
}
|
||||
|
||||
cfg.set_flags(flags);
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
pub fn new_from_config(config: &TomlConfigLoader) -> Result<Self, anyhow::Error> {
|
||||
let default_config = TomlConfigLoader::default();
|
||||
|
||||
let mut result = Self::default();
|
||||
|
||||
result.instance_id = Some(config.get_id().to_string());
|
||||
if config.get_hostname() != default_config.get_hostname() {
|
||||
result.hostname = Some(config.get_hostname());
|
||||
}
|
||||
|
||||
result.dhcp = Some(config.get_dhcp());
|
||||
|
||||
let network_identity = config.get_network_identity();
|
||||
result.network_name = Some(network_identity.network_name.clone());
|
||||
result.network_secret = network_identity.network_secret.clone();
|
||||
|
||||
if let Some(ipv4) = config.get_ipv4() {
|
||||
result.virtual_ipv4 = Some(ipv4.address().to_string());
|
||||
result.network_length = Some(ipv4.network_length() as i32);
|
||||
}
|
||||
|
||||
let peers = config.get_peers();
|
||||
match peers.len() {
|
||||
1 => {
|
||||
result.networking_method = Some(NetworkingMethod::PublicServer as i32);
|
||||
result.public_server_url = Some(peers[0].uri.to_string());
|
||||
}
|
||||
0 => {
|
||||
result.networking_method = Some(NetworkingMethod::Standalone as i32);
|
||||
}
|
||||
_ => {
|
||||
result.networking_method = Some(NetworkingMethod::Manual as i32);
|
||||
result.peer_urls = peers.iter().map(|p| p.uri.to_string()).collect();
|
||||
}
|
||||
}
|
||||
|
||||
result.listener_urls = config
|
||||
.get_listeners()
|
||||
.unwrap_or_else(|| vec![])
|
||||
.iter()
|
||||
.map(|l| l.to_string())
|
||||
.collect();
|
||||
|
||||
result.proxy_cidrs = config
|
||||
.get_proxy_cidrs()
|
||||
.iter()
|
||||
.map(|c| {
|
||||
if let Some(mapped) = c.mapped_cidr {
|
||||
format!("{}->{}", c.cidr, mapped)
|
||||
} else {
|
||||
c.cidr.to_string()
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
if let Some(rpc_portal) = config.get_rpc_portal() {
|
||||
result.rpc_port = Some(rpc_portal.port() as i32);
|
||||
}
|
||||
|
||||
if let Some(whitelist) = config.get_rpc_portal_whitelist() {
|
||||
result.rpc_portal_whitelists = whitelist.iter().map(|w| w.to_string()).collect();
|
||||
}
|
||||
|
||||
if let Some(vpn_config) = config.get_vpn_portal_config() {
|
||||
result.enable_vpn_portal = Some(true);
|
||||
|
||||
let cidr = vpn_config.client_cidr;
|
||||
result.vpn_portal_client_network_addr = Some(cidr.first_address().to_string());
|
||||
result.vpn_portal_client_network_len = Some(cidr.network_length() as i32);
|
||||
|
||||
result.vpn_portal_listen_port = Some(vpn_config.wireguard_listen.port() as i32);
|
||||
}
|
||||
|
||||
if let Some(routes) = config.get_routes() {
|
||||
if !routes.is_empty() {
|
||||
result.enable_manual_routes = Some(true);
|
||||
result.routes = routes.iter().map(|r| r.to_string()).collect();
|
||||
}
|
||||
}
|
||||
|
||||
let exit_nodes = config.get_exit_nodes();
|
||||
if !exit_nodes.is_empty() {
|
||||
result.exit_nodes = exit_nodes.iter().map(|n| n.to_string()).collect();
|
||||
}
|
||||
|
||||
if let Some(socks5_portal) = config.get_socks5_portal() {
|
||||
result.enable_socks5 = Some(true);
|
||||
result.socks5_port = socks5_portal.port().map(|p| p as i32);
|
||||
}
|
||||
|
||||
let mapped_listeners = config.get_mapped_listeners();
|
||||
if !mapped_listeners.is_empty() {
|
||||
result.mapped_listeners = mapped_listeners.iter().map(|l| l.to_string()).collect();
|
||||
}
|
||||
|
||||
let flags = config.get_flags();
|
||||
result.latency_first = Some(flags.latency_first);
|
||||
result.dev_name = Some(flags.dev_name.clone());
|
||||
result.use_smoltcp = Some(flags.use_smoltcp);
|
||||
result.enable_kcp_proxy = Some(flags.enable_kcp_proxy);
|
||||
result.disable_kcp_input = Some(flags.disable_kcp_input);
|
||||
result.enable_quic_proxy = Some(flags.enable_quic_proxy);
|
||||
result.disable_quic_input = Some(flags.disable_quic_input);
|
||||
result.disable_p2p = Some(flags.disable_p2p);
|
||||
result.bind_device = Some(flags.bind_device);
|
||||
result.no_tun = Some(flags.no_tun);
|
||||
result.enable_exit_node = Some(flags.enable_exit_node);
|
||||
result.relay_all_peer_rpc = Some(flags.relay_all_peer_rpc);
|
||||
result.multi_thread = Some(flags.multi_thread);
|
||||
result.proxy_forward_by_system = Some(flags.proxy_forward_by_system);
|
||||
result.disable_encryption = Some(!flags.enable_encryption);
|
||||
result.disable_udp_hole_punching = Some(flags.disable_udp_hole_punching);
|
||||
result.enable_magic_dns = Some(flags.accept_dns);
|
||||
result.mtu = Some(flags.mtu as i32);
|
||||
result.enable_private_mode = Some(flags.private_mode);
|
||||
|
||||
if !flags.relay_network_whitelist.is_empty() && flags.relay_network_whitelist != "*" {
|
||||
result.enable_relay_network_whitelist = Some(true);
|
||||
result.relay_network_whitelist = flags
|
||||
.relay_network_whitelist
|
||||
.split_whitespace()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::config::ConfigLoader;
|
||||
use rand::Rng;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
fn gen_default_config() -> crate::common::config::TomlConfigLoader {
|
||||
let config = crate::common::config::TomlConfigLoader::default();
|
||||
config.set_id(uuid::Uuid::new_v4());
|
||||
config.set_dhcp(false);
|
||||
config.set_inst_name("default".to_string());
|
||||
config.set_listeners(vec![]);
|
||||
config.set_rpc_portal(std::net::SocketAddr::from(([0, 0, 0, 0], 0)));
|
||||
config
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_conversion_basic() -> Result<(), anyhow::Error> {
|
||||
let config = gen_default_config();
|
||||
|
||||
let network_config = super::NetworkConfig::new_from_config(&config)?;
|
||||
|
||||
let generated_config = network_config.gen_config()?;
|
||||
|
||||
let config_str = config.dump();
|
||||
let generated_config_str = generated_config.dump();
|
||||
|
||||
assert_eq!(
|
||||
config_str, generated_config_str,
|
||||
"Generated config does not match original config:\nOriginal:\n{}\n\nGenerated:\n{}\nNetwork Config: {}\n",
|
||||
config_str, generated_config_str, serde_json::to_string(&network_config).unwrap()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_conversion_random() -> Result<(), anyhow::Error> {
|
||||
let mut rng = rand::thread_rng();
|
||||
|
||||
for _ in 0..100 {
|
||||
let config = gen_default_config();
|
||||
|
||||
config.set_id(uuid::Uuid::new_v4());
|
||||
|
||||
config.set_dhcp(rng.gen_bool(0.5));
|
||||
|
||||
if rng.gen_bool(0.7) {
|
||||
let hostname = format!("host-{}", rng.gen::<u16>());
|
||||
config.set_hostname(Some(hostname));
|
||||
}
|
||||
|
||||
config.set_network_identity(crate::common::config::NetworkIdentity::new(
|
||||
format!("network-{}", rng.gen::<u16>()),
|
||||
format!("secret-{}", rng.gen::<u64>()),
|
||||
));
|
||||
config.set_inst_name(config.get_network_identity().network_name.clone());
|
||||
|
||||
if !config.get_dhcp() {
|
||||
let addr = Ipv4Addr::new(
|
||||
rng.gen_range(1..254),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(1..254),
|
||||
);
|
||||
let prefix_len = rng.gen_range(1..31);
|
||||
let ipv4 = format!("{}/{}", addr, prefix_len).parse().unwrap();
|
||||
config.set_ipv4(Some(ipv4));
|
||||
}
|
||||
|
||||
let peer_count = rng.gen_range(0..3);
|
||||
let mut peers = Vec::new();
|
||||
for _ in 0..peer_count {
|
||||
let port = rng.gen_range(10000..60000);
|
||||
let protocol = if rng.gen_bool(0.5) { "tcp" } else { "udp" };
|
||||
let uri = format!("{}://127.0.0.1:{}", protocol, port)
|
||||
.parse()
|
||||
.unwrap();
|
||||
peers.push(crate::common::config::PeerConfig { uri });
|
||||
}
|
||||
config.set_peers(peers);
|
||||
|
||||
if rng.gen_bool(0.7) {
|
||||
let listener_count = rng.gen_range(0..3);
|
||||
let mut listeners = Vec::new();
|
||||
for _ in 0..listener_count {
|
||||
let port = rng.gen_range(10000..60000);
|
||||
let protocol = if rng.gen_bool(0.5) { "tcp" } else { "udp" };
|
||||
listeners.push(format!("{}://0.0.0.0:{}", protocol, port).parse().unwrap());
|
||||
}
|
||||
config.set_listeners(listeners);
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.6) {
|
||||
let proxy_count = rng.gen_range(0..3);
|
||||
for _ in 0..proxy_count {
|
||||
let network = format!(
|
||||
"{}.{}.{}.0/{}",
|
||||
rng.gen_range(1..254),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(24..30)
|
||||
)
|
||||
.parse::<cidr::Ipv4Cidr>()
|
||||
.unwrap();
|
||||
|
||||
let mapped_network = if rng.gen_bool(0.5) {
|
||||
Some(
|
||||
format!(
|
||||
"{}.{}.{}.0/{}",
|
||||
rng.gen_range(1..254),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
network.network_length()
|
||||
)
|
||||
.parse::<cidr::Ipv4Cidr>()
|
||||
.unwrap(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
config.add_proxy_cidr(network, mapped_network);
|
||||
}
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.8) {
|
||||
let port = rng.gen_range(0..65535);
|
||||
config.set_rpc_portal(std::net::SocketAddr::from(([0, 0, 0, 0], port)));
|
||||
|
||||
if rng.gen_bool(0.6) {
|
||||
let whitelist_count = rng.gen_range(1..3);
|
||||
let mut whitelist = Vec::new();
|
||||
for _ in 0..whitelist_count {
|
||||
let ip = Ipv4Addr::new(
|
||||
rng.gen_range(1..254),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
);
|
||||
let cidr = format!("{}/32", ip);
|
||||
whitelist.push(cidr.parse().unwrap());
|
||||
}
|
||||
config.set_rpc_portal_whitelist(Some(whitelist));
|
||||
}
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.5) {
|
||||
let vpn_network = format!(
|
||||
"{}.{}.{}.0/{}",
|
||||
rng.gen_range(10..173),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(24..30)
|
||||
);
|
||||
let vpn_port = rng.gen_range(10000..60000);
|
||||
config.set_vpn_portal_config(crate::common::config::VpnPortalConfig {
|
||||
client_cidr: vpn_network.parse().unwrap(),
|
||||
wireguard_listen: format!("0.0.0.0:{}", vpn_port).parse().unwrap(),
|
||||
});
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.6) {
|
||||
let route_count = rng.gen_range(1..3);
|
||||
let mut routes = Vec::new();
|
||||
for _ in 0..route_count {
|
||||
let route = format!(
|
||||
"{}.{}.{}.0/{}",
|
||||
rng.gen_range(1..254),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(24..30)
|
||||
);
|
||||
routes.push(route.parse().unwrap());
|
||||
}
|
||||
config.set_routes(Some(routes));
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.4) {
|
||||
let node_count = rng.gen_range(1..3);
|
||||
let mut nodes = Vec::new();
|
||||
for _ in 0..node_count {
|
||||
let ip = Ipv4Addr::new(
|
||||
rng.gen_range(1..254),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(0..255),
|
||||
rng.gen_range(1..254),
|
||||
);
|
||||
nodes.push(ip);
|
||||
}
|
||||
config.set_exit_nodes(nodes);
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.5) {
|
||||
let socks5_port = rng.gen_range(10000..60000);
|
||||
config.set_socks5_portal(Some(
|
||||
format!("socks5://0.0.0.0:{}", socks5_port).parse().unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.4) {
|
||||
let count = rng.gen_range(1..3);
|
||||
let mut mapped_listeners = Vec::new();
|
||||
for _ in 0..count {
|
||||
let port = rng.gen_range(10000..60000);
|
||||
mapped_listeners.push(format!("tcp://0.0.0.0:{}", port).parse().unwrap());
|
||||
}
|
||||
config.set_mapped_listeners(Some(mapped_listeners));
|
||||
}
|
||||
|
||||
if rng.gen_bool(0.9) {
|
||||
let mut flags = crate::common::config::gen_default_flags();
|
||||
flags.latency_first = rng.gen_bool(0.5);
|
||||
flags.dev_name = format!("etun{}", rng.gen_range(0..10));
|
||||
flags.use_smoltcp = rng.gen_bool(0.3);
|
||||
flags.enable_kcp_proxy = rng.gen_bool(0.5);
|
||||
flags.disable_kcp_input = rng.gen_bool(0.3);
|
||||
flags.enable_quic_proxy = rng.gen_bool(0.5);
|
||||
flags.disable_quic_input = rng.gen_bool(0.3);
|
||||
flags.disable_p2p = rng.gen_bool(0.2);
|
||||
flags.bind_device = rng.gen_bool(0.3);
|
||||
flags.no_tun = rng.gen_bool(0.1);
|
||||
flags.enable_exit_node = rng.gen_bool(0.4);
|
||||
flags.relay_all_peer_rpc = rng.gen_bool(0.5);
|
||||
flags.multi_thread = rng.gen_bool(0.7);
|
||||
flags.proxy_forward_by_system = rng.gen_bool(0.3);
|
||||
flags.enable_encryption = rng.gen_bool(0.8);
|
||||
flags.disable_udp_hole_punching = rng.gen_bool(0.2);
|
||||
flags.accept_dns = rng.gen_bool(0.6);
|
||||
flags.mtu = rng.gen_range(1200..1500);
|
||||
flags.private_mode = rng.gen_bool(0.3);
|
||||
|
||||
if rng.gen_bool(0.4) {
|
||||
flags.relay_network_whitelist = (0..rng.gen_range(1..3))
|
||||
.map(|_| {
|
||||
format!(
|
||||
"{}.{}.0.0/16",
|
||||
rng.gen_range(10..192),
|
||||
rng.gen_range(0..255)
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
config.set_flags(flags);
|
||||
}
|
||||
|
||||
let network_config = super::NetworkConfig::new_from_config(&config)?;
|
||||
let generated_config = network_config.gen_config()?;
|
||||
generated_config.set_peers(generated_config.get_peers()); // Ensure peers field is not None
|
||||
|
||||
let config_str = config.dump();
|
||||
let generated_config_str = generated_config.dump();
|
||||
|
||||
assert_eq!(
|
||||
config_str, generated_config_str,
|
||||
"Generated config does not match original config:\nOriginal:\n{}\n\nGenerated:\n{}\nNetwork Config: {}\n",
|
||||
config_str, generated_config_str, serde_json::to_string(&network_config).unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ mod vpn_portal;
|
||||
pub mod common;
|
||||
pub mod connector;
|
||||
pub mod launcher;
|
||||
pub mod instance_manager;
|
||||
pub mod peers;
|
||||
pub mod proto;
|
||||
pub mod tunnel;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
sync::Arc,
|
||||
sync::{Arc, Weak},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
@@ -31,7 +31,8 @@ use crate::{
|
||||
use super::{server::PeerCenterServer, Digest, Error};
|
||||
|
||||
struct PeerCenterBase {
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
peer_mgr: Weak<PeerManager>,
|
||||
my_peer_id: PeerId,
|
||||
tasks: Mutex<JoinSet<()>>,
|
||||
lock: Arc<Mutex<()>>,
|
||||
}
|
||||
@@ -40,20 +41,25 @@ struct PeerCenterBase {
|
||||
static SERVICE_ID: u32 = 50;
|
||||
|
||||
struct PeridicJobCtx<T> {
|
||||
peer_mgr: Arc<PeerManager>,
|
||||
peer_mgr: Weak<PeerManager>,
|
||||
my_peer_id: PeerId,
|
||||
center_peer: AtomicCell<PeerId>,
|
||||
job_ctx: T,
|
||||
}
|
||||
|
||||
impl PeerCenterBase {
|
||||
pub async fn init(&self) -> Result<(), Error> {
|
||||
self.peer_mgr
|
||||
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
|
||||
return Err(Error::Shutdown);
|
||||
};
|
||||
|
||||
peer_mgr
|
||||
.get_peer_rpc_mgr()
|
||||
.rpc_server()
|
||||
.registry()
|
||||
.register(
|
||||
PeerCenterRpcServer::new(PeerCenterServer::new(self.peer_mgr.my_peer_id())),
|
||||
&self.peer_mgr.get_global_ctx().get_network_name(),
|
||||
PeerCenterRpcServer::new(PeerCenterServer::new(peer_mgr.my_peer_id())),
|
||||
&peer_mgr.get_global_ctx().get_network_name(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -91,17 +97,23 @@ impl PeerCenterBase {
|
||||
+ Sync
|
||||
+ 'static),
|
||||
) -> () {
|
||||
let my_peer_id = self.peer_mgr.my_peer_id();
|
||||
let my_peer_id = self.my_peer_id;
|
||||
let peer_mgr = self.peer_mgr.clone();
|
||||
let lock = self.lock.clone();
|
||||
self.tasks.lock().await.spawn(
|
||||
async move {
|
||||
let ctx = Arc::new(PeridicJobCtx {
|
||||
peer_mgr: peer_mgr.clone(),
|
||||
my_peer_id,
|
||||
center_peer: AtomicCell::new(PeerId::default()),
|
||||
job_ctx,
|
||||
});
|
||||
loop {
|
||||
let Some(peer_mgr) = peer_mgr.upgrade() else {
|
||||
tracing::error!("peer manager is shutdown, exit periodic job");
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(center_peer) = Self::select_center_peer(&peer_mgr).await else {
|
||||
tracing::trace!("no center peer found, sleep 1 second");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
@@ -138,7 +150,8 @@ impl PeerCenterBase {
|
||||
|
||||
pub fn new(peer_mgr: Arc<PeerManager>) -> Self {
|
||||
PeerCenterBase {
|
||||
peer_mgr,
|
||||
peer_mgr: Arc::downgrade(&peer_mgr),
|
||||
my_peer_id: peer_mgr.my_peer_id(),
|
||||
tasks: Mutex::new(JoinSet::new()),
|
||||
lock: Arc::new(Mutex::new(())),
|
||||
}
|
||||
@@ -289,7 +302,7 @@ impl PeerCenterInstance {
|
||||
|
||||
self.client
|
||||
.init_periodic_job(ctx, |client, ctx| async move {
|
||||
let my_node_id = ctx.peer_mgr.my_peer_id();
|
||||
let my_node_id = ctx.my_peer_id;
|
||||
let peers: PeerInfoForGlobalMap = ctx.job_ctx.service.list_peers().await.into();
|
||||
let peer_list = peers.direct_peers.keys().map(|k| *k).collect();
|
||||
let job_ctx = &ctx.job_ctx;
|
||||
|
||||
@@ -19,6 +19,8 @@ pub enum Error {
|
||||
DigestMismatch,
|
||||
#[error("Not center server")]
|
||||
NotCenterServer,
|
||||
#[error("Instance shutdown")]
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
pub type Digest = u64;
|
||||
|
||||
@@ -26,12 +26,13 @@ use crate::{
|
||||
global_ctx::{ArcGlobalCtx, GlobalCtx, GlobalCtxEvent, NetworkIdentity},
|
||||
join_joinset_background,
|
||||
stun::MockStunInfoCollector,
|
||||
token_bucket::TokenBucket,
|
||||
PeerId,
|
||||
},
|
||||
peers::route_trait::{Route, RouteInterface},
|
||||
proto::{
|
||||
cli::{ForeignNetworkEntryPb, ListForeignNetworkResponse, PeerInfo},
|
||||
common::NatType,
|
||||
common::{LimiterConfig, NatType},
|
||||
peer_rpc::DirectConnectorRpcServer,
|
||||
},
|
||||
tunnel::packet_def::{PacketType, ZCPacket},
|
||||
@@ -69,14 +70,19 @@ struct ForeignNetworkEntry {
|
||||
|
||||
packet_recv: Mutex<Option<PacketRecvChanReceiver>>,
|
||||
|
||||
bps_limiter: Arc<TokenBucket>,
|
||||
|
||||
tasks: Mutex<JoinSet<()>>,
|
||||
|
||||
pub lock: Mutex<()>,
|
||||
}
|
||||
|
||||
impl ForeignNetworkEntry {
|
||||
fn new(
|
||||
network: NetworkIdentity,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
// NOTICE: ospf route need my_peer_id be changed after restart.
|
||||
my_peer_id: PeerId,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
relay_data: bool,
|
||||
pm_packet_sender: PacketRecvChan,
|
||||
) -> Self {
|
||||
@@ -99,6 +105,16 @@ impl ForeignNetworkEntry {
|
||||
&network.network_name,
|
||||
);
|
||||
|
||||
let relay_bps_limit = global_ctx.config.get_flags().foreign_relay_bps_limit;
|
||||
let limiter_config = LimiterConfig {
|
||||
burst_rate: None,
|
||||
bps: Some(relay_bps_limit),
|
||||
fill_duration_ms: None,
|
||||
};
|
||||
let bps_limiter = global_ctx
|
||||
.token_bucket_manager()
|
||||
.get_or_create(&network.network_name, limiter_config.into());
|
||||
|
||||
Self {
|
||||
my_peer_id,
|
||||
|
||||
@@ -113,7 +129,11 @@ impl ForeignNetworkEntry {
|
||||
|
||||
packet_recv: Mutex::new(Some(packet_recv)),
|
||||
|
||||
bps_limiter,
|
||||
|
||||
tasks: Mutex::new(JoinSet::new()),
|
||||
|
||||
lock: Mutex::new(()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,11 +222,7 @@ impl ForeignNetworkEntry {
|
||||
(peer_rpc, rpc_transport_sender)
|
||||
}
|
||||
|
||||
async fn prepare_route(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
accessor: Box<dyn GlobalForeignNetworkAccessor>,
|
||||
) {
|
||||
async fn prepare_route(&self, accessor: Box<dyn GlobalForeignNetworkAccessor>) {
|
||||
struct Interface {
|
||||
my_peer_id: PeerId,
|
||||
peer_map: Weak<PeerMap>,
|
||||
@@ -238,10 +254,14 @@ impl ForeignNetworkEntry {
|
||||
}
|
||||
}
|
||||
|
||||
let route = PeerRoute::new(my_peer_id, self.global_ctx.clone(), self.peer_rpc.clone());
|
||||
let route = PeerRoute::new(
|
||||
self.my_peer_id,
|
||||
self.global_ctx.clone(),
|
||||
self.peer_rpc.clone(),
|
||||
);
|
||||
route
|
||||
.open(Box::new(Interface {
|
||||
my_peer_id,
|
||||
my_peer_id: self.my_peer_id,
|
||||
network_identity: self.network.clone(),
|
||||
peer_map: Arc::downgrade(&self.peer_map),
|
||||
accessor,
|
||||
@@ -260,6 +280,7 @@ impl ForeignNetworkEntry {
|
||||
let relay_data = self.relay_data;
|
||||
let pm_sender = self.pm_packet_sender.lock().await.take().unwrap();
|
||||
let network_name = self.network.network_name.clone();
|
||||
let bps_limiter = self.bps_limiter.clone();
|
||||
|
||||
self.tasks.lock().await.spawn(async move {
|
||||
while let Ok(zc_packet) = recv_packet_from_chan(&mut recv).await {
|
||||
@@ -279,8 +300,16 @@ impl ForeignNetworkEntry {
|
||||
}
|
||||
tracing::trace!(?hdr, "ignore packet in foreign network");
|
||||
} else {
|
||||
if !relay_data && hdr.packet_type == PacketType::Data as u8 {
|
||||
continue;
|
||||
if hdr.packet_type == PacketType::Data as u8
|
||||
|| hdr.packet_type == PacketType::KcpSrc as u8
|
||||
|| hdr.packet_type == PacketType::KcpDst as u8
|
||||
{
|
||||
if !relay_data {
|
||||
continue;
|
||||
}
|
||||
if !bps_limiter.try_consume(hdr.len.into()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let gateway_peer_id = peer_map
|
||||
@@ -317,8 +346,8 @@ impl ForeignNetworkEntry {
|
||||
});
|
||||
}
|
||||
|
||||
async fn prepare(&self, my_peer_id: PeerId, accessor: Box<dyn GlobalForeignNetworkAccessor>) {
|
||||
self.prepare_route(my_peer_id, accessor).await;
|
||||
async fn prepare(&self, accessor: Box<dyn GlobalForeignNetworkAccessor>) {
|
||||
self.prepare_route(accessor).await;
|
||||
self.start_packet_recv().await;
|
||||
self.peer_rpc.run();
|
||||
}
|
||||
@@ -400,8 +429,8 @@ impl ForeignNetworkManagerData {
|
||||
new_added = true;
|
||||
Arc::new(ForeignNetworkEntry::new(
|
||||
network_identity.clone(),
|
||||
global_ctx.clone(),
|
||||
my_peer_id,
|
||||
global_ctx.clone(),
|
||||
relay_data,
|
||||
pm_packet_sender.clone(),
|
||||
))
|
||||
@@ -417,9 +446,7 @@ impl ForeignNetworkManagerData {
|
||||
drop(l);
|
||||
|
||||
if new_added {
|
||||
entry
|
||||
.prepare(my_peer_id, Box::new(self.accessor.clone()))
|
||||
.await;
|
||||
entry.prepare(Box::new(self.accessor.clone())).await;
|
||||
}
|
||||
|
||||
(entry, new_added)
|
||||
@@ -467,6 +494,13 @@ impl ForeignNetworkManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_network_peer_id(&self, network_name: &str) -> Option<PeerId> {
|
||||
self.data
|
||||
.network_peer_maps
|
||||
.get(network_name)
|
||||
.and_then(|v| Some(v.my_peer_id))
|
||||
}
|
||||
|
||||
pub async fn add_peer_conn(&self, peer_conn: PeerConn) -> Result<(), Error> {
|
||||
tracing::info!(peer_conn = ?peer_conn.get_conn_info(), network = ?peer_conn.get_network_identity(), "add new peer conn in foreign network manager");
|
||||
|
||||
@@ -483,7 +517,7 @@ impl ForeignNetworkManager {
|
||||
.data
|
||||
.get_or_insert_entry(
|
||||
&peer_conn.get_network_identity(),
|
||||
self.my_peer_id,
|
||||
peer_conn.get_my_peer_id(),
|
||||
peer_conn.get_peer_id(),
|
||||
!ret.is_err(),
|
||||
&self.global_ctx,
|
||||
@@ -491,17 +525,30 @@ impl ForeignNetworkManager {
|
||||
)
|
||||
.await;
|
||||
|
||||
if entry.network != peer_conn.get_network_identity() {
|
||||
let _g = entry.lock.lock().await;
|
||||
|
||||
if entry.network != peer_conn.get_network_identity()
|
||||
|| entry.my_peer_id != peer_conn.get_my_peer_id()
|
||||
{
|
||||
if new_added {
|
||||
self.data
|
||||
.remove_network(&entry.network.network_name.clone());
|
||||
}
|
||||
return Err(anyhow::anyhow!(
|
||||
"network secret not match. exp: {:?} real: {:?}",
|
||||
entry.network,
|
||||
peer_conn.get_network_identity()
|
||||
)
|
||||
.into());
|
||||
let err = if entry.my_peer_id != peer_conn.get_my_peer_id() {
|
||||
anyhow::anyhow!(
|
||||
"my peer id not match. exp: {:?} real: {:?}, need retry connect",
|
||||
entry.my_peer_id,
|
||||
peer_conn.get_my_peer_id()
|
||||
)
|
||||
} else {
|
||||
anyhow::anyhow!(
|
||||
"network secret not match. exp: {:?} real: {:?}",
|
||||
entry.network,
|
||||
peer_conn.get_network_identity()
|
||||
)
|
||||
};
|
||||
tracing::error!(?err, "foreign network entry not match, disconnect peer");
|
||||
return Err(err.into());
|
||||
}
|
||||
|
||||
if new_added {
|
||||
@@ -567,7 +614,8 @@ impl ForeignNetworkManager {
|
||||
.network_secret_digest
|
||||
.unwrap_or_default()
|
||||
.to_vec(),
|
||||
..Default::default()
|
||||
my_peer_id_for_this_network: item.my_peer_id,
|
||||
peers: Default::default(),
|
||||
};
|
||||
for peer in item.peer_map.list_peers().await {
|
||||
let mut peer_info = PeerInfo::default();
|
||||
@@ -614,8 +662,6 @@ impl Drop for ForeignNetworkManager {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::{
|
||||
common::global_ctx::tests::get_mock_global_ctx_with_network,
|
||||
connector::udp_hole_punch::tests::{
|
||||
@@ -629,6 +675,7 @@ mod tests {
|
||||
set_global_var,
|
||||
tunnel::common::tests::wait_for_condition,
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
use super::*;
|
||||
|
||||
@@ -698,7 +745,7 @@ mod tests {
|
||||
let s_ret =
|
||||
tokio::spawn(async move { b_mgr_copy.add_tunnel_as_server(b_ring, true).await });
|
||||
|
||||
pma_net1.add_client_tunnel(a_ring).await.unwrap();
|
||||
pma_net1.add_client_tunnel(a_ring, false).await.unwrap();
|
||||
|
||||
s_ret.await.unwrap().unwrap();
|
||||
}
|
||||
@@ -769,7 +816,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
vec![pm_center.my_peer_id()],
|
||||
vec![pm_center
|
||||
.get_foreign_network_manager()
|
||||
.get_network_peer_id("net1")
|
||||
.unwrap()],
|
||||
pma_net1
|
||||
.get_foreign_network_client()
|
||||
.get_peer_map()
|
||||
@@ -777,7 +827,10 @@ mod tests {
|
||||
.await
|
||||
);
|
||||
assert_eq!(
|
||||
vec![pm_center.my_peer_id()],
|
||||
vec![pm_center
|
||||
.get_foreign_network_manager()
|
||||
.get_network_peer_id("net1")
|
||||
.unwrap()],
|
||||
pmb_net1
|
||||
.get_foreign_network_client()
|
||||
.get_peer_map()
|
||||
@@ -894,6 +947,75 @@ mod tests {
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_foreign_network_manager_cluster_simple() {
|
||||
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
|
||||
|
||||
let pm_center1 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let pm_center2 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
|
||||
connect_peer_manager(pm_center1.clone(), pm_center2.clone()).await;
|
||||
|
||||
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
|
||||
let pmb_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
|
||||
connect_peer_manager(pma_net1.clone(), pm_center1.clone()).await;
|
||||
connect_peer_manager(pmb_net1.clone(), pm_center2.clone()).await;
|
||||
|
||||
wait_route_appear(pma_net1.clone(), pmb_net1.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let pma_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
|
||||
let pmb_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
|
||||
connect_peer_manager(pma_net2.clone(), pm_center1.clone()).await;
|
||||
connect_peer_manager(pmb_net2.clone(), pm_center2.clone()).await;
|
||||
|
||||
wait_route_appear(pma_net2.clone(), pmb_net2.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_foreign_network_manager_cluster_multiple_hops() {
|
||||
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
|
||||
|
||||
let pm_center1 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let pm_center2 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let pm_center3 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let pm_center4 = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
|
||||
connect_peer_manager(pm_center1.clone(), pm_center2.clone()).await;
|
||||
connect_peer_manager(pm_center2.clone(), pm_center3.clone()).await;
|
||||
connect_peer_manager(pm_center3.clone(), pm_center4.clone()).await;
|
||||
|
||||
let pma_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
|
||||
let pmb_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
|
||||
connect_peer_manager(pma_net1.clone(), pm_center1.clone()).await;
|
||||
connect_peer_manager(pmb_net1.clone(), pm_center3.clone()).await;
|
||||
wait_route_appear(pma_net1.clone(), pmb_net1.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
let pmc_net1 = create_mock_peer_manager_for_foreign_network("net1").await;
|
||||
connect_peer_manager(pmc_net1.clone(), pm_center4.clone()).await;
|
||||
wait_route_appear(pma_net1.clone(), pmc_net1.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let pma_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
|
||||
let pmb_net2 = create_mock_peer_manager_for_foreign_network("net2").await;
|
||||
connect_peer_manager(pma_net2.clone(), pm_center1.clone()).await;
|
||||
connect_peer_manager(pmb_net2.clone(), pm_center4.clone()).await;
|
||||
wait_route_appear(pma_net2.clone(), pmb_net2.clone())
|
||||
.await
|
||||
.unwrap();
|
||||
drop(pmb_net2);
|
||||
wait_for_condition(
|
||||
|| async { pma_net2.list_routes().await.len() == 1 },
|
||||
Duration::from_secs(5),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_foreign_network_manager_cluster() {
|
||||
set_global_var!(OSPF_UPDATE_MY_GLOBAL_FOREIGN_NETWORK_INTERVAL_SEC, 1);
|
||||
|
||||
179
easytier/src/peers/graph_algo.rs
Normal file
179
easytier/src/peers/graph_algo.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
use core::cmp::Ordering;
|
||||
use hashbrown::hash_map::{
|
||||
Entry::{Occupied, Vacant},
|
||||
HashMap,
|
||||
};
|
||||
use petgraph::{
|
||||
algo::Measure,
|
||||
visit::{EdgeRef as _, IntoEdges, VisitMap as _, Visitable},
|
||||
};
|
||||
use std::{collections::BinaryHeap, hash::Hash};
|
||||
|
||||
/// `MinScored<K, T>` holds a score `K` and a scored object `T` in
|
||||
/// a pair for use with a `BinaryHeap`.
|
||||
///
|
||||
/// `MinScored` compares in reverse order by the score, so that we can
|
||||
/// use `BinaryHeap` as a min-heap to extract the score-value pair with the
|
||||
/// least score.
|
||||
///
|
||||
/// **Note:** `MinScored` implements a total order (`Ord`), so that it is
|
||||
/// possible to use float types as scores.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct MinScored<K, T>(pub K, pub T);
|
||||
|
||||
impl<K: PartialOrd, T> PartialEq for MinScored<K, T> {
|
||||
#[inline]
|
||||
fn eq(&self, other: &MinScored<K, T>) -> bool {
|
||||
self.cmp(other) == Ordering::Equal
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: PartialOrd, T> Eq for MinScored<K, T> {}
|
||||
|
||||
impl<K: PartialOrd, T> PartialOrd for MinScored<K, T> {
|
||||
#[inline]
|
||||
fn partial_cmp(&self, other: &MinScored<K, T>) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: PartialOrd, T> Ord for MinScored<K, T> {
|
||||
#[inline]
|
||||
fn cmp(&self, other: &MinScored<K, T>) -> Ordering {
|
||||
let a = &self.0;
|
||||
let b = &other.0;
|
||||
if a == b {
|
||||
Ordering::Equal
|
||||
} else if a < b {
|
||||
Ordering::Greater
|
||||
} else if a > b {
|
||||
Ordering::Less
|
||||
} else if a.ne(a) && b.ne(b) {
|
||||
// these are the NaN cases
|
||||
Ordering::Equal
|
||||
} else if a.ne(a) {
|
||||
// Order NaN less, so that it is last in the MinScore order
|
||||
Ordering::Less
|
||||
} else {
|
||||
Ordering::Greater
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn dijkstra_with_first_hop<G, F, K>(
|
||||
graph: G,
|
||||
start: G::NodeId,
|
||||
mut edge_cost: F,
|
||||
) -> (
|
||||
HashMap<G::NodeId, K>,
|
||||
HashMap<G::NodeId, (G::NodeId, usize)>,
|
||||
)
|
||||
where
|
||||
G: IntoEdges + Visitable,
|
||||
G::NodeId: Eq + Hash + Clone,
|
||||
F: FnMut(G::EdgeRef) -> K,
|
||||
K: Measure + Copy,
|
||||
{
|
||||
let mut visited = graph.visit_map();
|
||||
let mut scores = HashMap::new();
|
||||
let mut first_hop = HashMap::new();
|
||||
let mut visit_next = BinaryHeap::new();
|
||||
let zero_score = K::default();
|
||||
scores.insert(start.clone(), zero_score);
|
||||
visit_next.push(MinScored(zero_score, start.clone()));
|
||||
first_hop.insert(start.clone(), (start.clone(), 0));
|
||||
|
||||
while let Some(MinScored(node_score, node)) = visit_next.pop() {
|
||||
if visited.is_visited(&node) {
|
||||
continue;
|
||||
}
|
||||
for edge in graph.edges(node.clone()) {
|
||||
let next = edge.target();
|
||||
if visited.is_visited(&next) {
|
||||
continue;
|
||||
}
|
||||
let next_score = node_score + edge_cost(edge);
|
||||
match scores.entry(next.clone()) {
|
||||
Occupied(mut ent) => {
|
||||
if next_score < *ent.get() {
|
||||
*ent.get_mut() = next_score;
|
||||
visit_next.push(MinScored(next_score, next.clone()));
|
||||
// 继承前驱的 first_hop,或自己就是第一跳
|
||||
let hop = if node == start {
|
||||
(next.clone(), 0)
|
||||
} else {
|
||||
first_hop[&node].clone()
|
||||
};
|
||||
first_hop.insert(next.clone(), (hop.0, hop.1 + 1));
|
||||
}
|
||||
}
|
||||
Vacant(ent) => {
|
||||
ent.insert(next_score);
|
||||
visit_next.push(MinScored(next_score, next.clone()));
|
||||
let hop = if node == start {
|
||||
(next.clone(), 0)
|
||||
} else {
|
||||
first_hop[&node].clone()
|
||||
};
|
||||
first_hop.insert(next.clone(), (hop.0, hop.1 + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
visited.visit(node);
|
||||
}
|
||||
|
||||
(scores, first_hop)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use petgraph::graph::DiGraph;
|
||||
|
||||
#[test]
|
||||
fn test_dijkstra_with_first_hop_4node() {
|
||||
let mut graph = DiGraph::<&str, u32>::new();
|
||||
let a = graph.add_node("a");
|
||||
let b = graph.add_node("b");
|
||||
let c = graph.add_node("c");
|
||||
let d = graph.add_node("d");
|
||||
|
||||
graph.extend_with_edges(&[(a, b, 1)]);
|
||||
graph.extend_with_edges(&[(b, c, 1)]);
|
||||
graph.extend_with_edges(&[(c, d, 2)]);
|
||||
|
||||
let (scores, first_hop) = dijkstra_with_first_hop(&graph, a, |edge| *edge.weight());
|
||||
|
||||
assert_eq!(scores[&b], 1);
|
||||
assert_eq!(scores[&c], 2);
|
||||
assert_eq!(scores[&d], 4);
|
||||
|
||||
assert_eq!(first_hop[&b], (b, 1));
|
||||
assert_eq!(first_hop[&c], (b, 2));
|
||||
assert_eq!(first_hop[&d], (b, 3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dijkstra_with_first_hop() {
|
||||
let mut graph = DiGraph::<&str, u32>::new();
|
||||
let a = graph.add_node("a");
|
||||
let b = graph.add_node("b");
|
||||
let c = graph.add_node("c");
|
||||
let d = graph.add_node("d");
|
||||
let e = graph.add_node("e");
|
||||
|
||||
graph.extend_with_edges(&[(a, b, 1), (a, c, 2), (b, d, 1), (c, d, 3), (d, e, 1)]);
|
||||
|
||||
let (scores, first_hop) = dijkstra_with_first_hop(&graph, a, |edge| *edge.weight());
|
||||
|
||||
assert_eq!(scores[&b], 1);
|
||||
assert_eq!(scores[&c], 2);
|
||||
assert_eq!(scores[&d], 2);
|
||||
assert_eq!(scores[&e], 3);
|
||||
|
||||
assert_eq!(first_hop[&b], (b, 1));
|
||||
assert_eq!(first_hop[&c], (c, 1));
|
||||
assert_eq!(first_hop[&d], (b, 2)); // d is reached via b
|
||||
assert_eq!(first_hop[&e], (b, 3)); // e is reached via d
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
mod graph_algo;
|
||||
|
||||
pub mod peer;
|
||||
// pub mod peer_conn;
|
||||
pub mod peer_conn;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use dashmap::DashMap;
|
||||
use dashmap::{DashMap, DashSet};
|
||||
|
||||
use tokio::{select, sync::mpsc, task::JoinHandle};
|
||||
use tokio::{select, sync::mpsc};
|
||||
|
||||
use tracing::Instrument;
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct Peer {
|
||||
packet_recv_chan: PacketRecvChan,
|
||||
|
||||
close_event_sender: mpsc::Sender<PeerConnId>,
|
||||
close_event_listener: JoinHandle<()>,
|
||||
close_event_listener: ScopedTask<()>,
|
||||
|
||||
shutdown_notifier: Arc<tokio::sync::Notify>,
|
||||
|
||||
@@ -87,7 +87,8 @@ impl Peer {
|
||||
"peer_close_event_listener",
|
||||
?peer_node_id,
|
||||
)),
|
||||
);
|
||||
)
|
||||
.into();
|
||||
|
||||
let default_conn_id = Arc::new(AtomicCell::new(PeerConnId::default()));
|
||||
|
||||
@@ -118,8 +119,14 @@ impl Peer {
|
||||
}
|
||||
|
||||
pub async fn add_peer_conn(&self, mut conn: PeerConn) {
|
||||
let close_event_sender = self.close_event_sender.clone();
|
||||
let close_notifier = conn.get_close_notifier();
|
||||
let conn_info = conn.get_conn_info();
|
||||
|
||||
conn.start_recv_loop(self.packet_recv_chan.clone()).await;
|
||||
conn.start_pingpong();
|
||||
self.conns.insert(conn.get_conn_id(), Arc::new(conn));
|
||||
|
||||
let close_event_sender = self.close_event_sender.clone();
|
||||
tokio::spawn(async move {
|
||||
let conn_id = close_notifier.get_conn_id();
|
||||
if let Some(mut waiter) = close_notifier.get_waiter().await {
|
||||
@@ -130,12 +137,8 @@ impl Peer {
|
||||
}
|
||||
});
|
||||
|
||||
conn.start_recv_loop(self.packet_recv_chan.clone()).await;
|
||||
conn.start_pingpong();
|
||||
|
||||
self.global_ctx
|
||||
.issue_event(GlobalCtxEvent::PeerConnAdded(conn.get_conn_info()));
|
||||
self.conns.insert(conn.get_conn_id(), Arc::new(conn));
|
||||
.issue_event(GlobalCtxEvent::PeerConnAdded(conn_info));
|
||||
}
|
||||
|
||||
async fn select_conn(&self) -> Option<ArcPeerConn> {
|
||||
@@ -186,11 +189,28 @@ impl Peer {
|
||||
|
||||
let mut ret = Vec::new();
|
||||
for conn in conns {
|
||||
ret.push(conn.get_conn_info());
|
||||
let info = conn.get_conn_info();
|
||||
if !info.is_closed {
|
||||
ret.push(info);
|
||||
} else {
|
||||
let conn_id = info.conn_id.parse().unwrap();
|
||||
let _ = self.close_peer_conn(&conn_id).await;
|
||||
}
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
pub fn has_directly_connected_conn(&self) -> bool {
|
||||
self.conns.iter().any(|entry|!(entry.value()).is_hole_punched())
|
||||
}
|
||||
|
||||
pub fn get_directly_connections(&self) -> DashSet<uuid::Uuid> {
|
||||
self.conns.iter()
|
||||
.filter(|entry| !(entry.value()).is_hole_punched())
|
||||
.map(|entry|(entry.value()).get_conn_id())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn get_default_conn_id(&self) -> PeerConnId {
|
||||
self.default_conn_id.load()
|
||||
}
|
||||
|
||||
@@ -101,6 +101,9 @@ pub struct PeerConn {
|
||||
info: Option<HandshakeRequest>,
|
||||
is_client: Option<bool>,
|
||||
|
||||
// remote or local
|
||||
is_hole_punched: bool,
|
||||
|
||||
close_event_notifier: Arc<PeerConnCloseNotify>,
|
||||
|
||||
ctrl_resp_sender: broadcast::Sender<ZCPacket>,
|
||||
@@ -152,6 +155,8 @@ impl PeerConn {
|
||||
info: None,
|
||||
is_client: None,
|
||||
|
||||
is_hole_punched: true,
|
||||
|
||||
close_event_notifier: Arc::new(PeerConnCloseNotify::new(conn_id)),
|
||||
|
||||
ctrl_resp_sender: ctrl_sender,
|
||||
@@ -166,6 +171,14 @@ impl PeerConn {
|
||||
self.conn_id
|
||||
}
|
||||
|
||||
pub fn set_is_hole_punched(&mut self, is_hole_punched: bool) {
|
||||
self.is_hole_punched = is_hole_punched;
|
||||
}
|
||||
|
||||
pub fn is_hole_punched(&self) -> bool {
|
||||
self.is_hole_punched
|
||||
}
|
||||
|
||||
async fn wait_handshake(&mut self, need_retry: &mut bool) -> Result<HandshakeRequest, Error> {
|
||||
*need_retry = false;
|
||||
|
||||
@@ -266,6 +279,31 @@ impl PeerConn {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(handshake_recved))]
|
||||
pub async fn do_handshake_as_server_ext<Fn>(
|
||||
&mut self,
|
||||
mut handshake_recved: Fn,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Fn: FnMut(&mut Self, &HandshakeRequest) -> Result<(), Error> + Send,
|
||||
{
|
||||
let rsp = self.wait_handshake_loop().await?;
|
||||
|
||||
handshake_recved(self, &rsp)?;
|
||||
|
||||
tracing::info!("handshake request: {:?}", rsp);
|
||||
self.info = Some(rsp);
|
||||
self.is_client = Some(false);
|
||||
|
||||
self.send_handshake().await?;
|
||||
|
||||
if self.get_peer_id() == self.my_peer_id {
|
||||
Err(Error::WaitRespError("peer id conflict".to_owned()))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
pub async fn do_handshake_as_server(&mut self) -> Result<(), Error> {
|
||||
let rsp = self.wait_handshake_loop().await?;
|
||||
@@ -432,8 +470,20 @@ impl PeerConn {
|
||||
loss_rate: (f64::from(self.loss_rate_stats.load(Ordering::Relaxed)) / 100.0) as f32,
|
||||
is_client: self.is_client.unwrap_or_default(),
|
||||
network_name: info.network_name.clone(),
|
||||
is_closed: self.close_event_notifier.is_closed(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_peer_id(&mut self, peer_id: PeerId) {
|
||||
if self.info.is_some() {
|
||||
panic!("set_peer_id should only be called before handshake");
|
||||
}
|
||||
self.my_peer_id = peer_id;
|
||||
}
|
||||
|
||||
pub fn get_my_peer_id(&self) -> PeerId {
|
||||
self.my_peer_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PeerConn {
|
||||
|
||||
@@ -55,7 +55,7 @@ impl std::fmt::Debug for PingIntervalController {
|
||||
|
||||
impl PingIntervalController {
|
||||
fn new(throughput: Arc<Throughput>, loss_counter: Arc<AtomicU32>) -> Self {
|
||||
let last_throughput = *throughput;
|
||||
let last_throughput = (*throughput).clone();
|
||||
|
||||
Self {
|
||||
throughput,
|
||||
@@ -92,7 +92,7 @@ impl PingIntervalController {
|
||||
self.backoff_idx = 0;
|
||||
}
|
||||
|
||||
self.last_throughput = *self.throughput;
|
||||
self.last_throughput = (*self.throughput).clone();
|
||||
|
||||
if (self.logic_time - self.last_send_logic_time) < (1 << self.backoff_idx) {
|
||||
return false;
|
||||
|
||||
@@ -23,7 +23,7 @@ use crate::{
|
||||
compressor::{Compressor as _, DefaultCompressor},
|
||||
constants::EASYTIER_VERSION,
|
||||
error::Error,
|
||||
global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity},
|
||||
global_ctx::{ArcGlobalCtx, NetworkIdentity},
|
||||
stun::StunInfoCollectorTrait,
|
||||
PeerId,
|
||||
},
|
||||
@@ -142,8 +142,7 @@ pub struct PeerManager {
|
||||
|
||||
exit_nodes: Vec<Ipv4Addr>,
|
||||
|
||||
// conns that are directly connected (which are not hole punched)
|
||||
directly_connected_conn_map: Arc<DashMap<PeerId, DashSet<uuid::Uuid>>>,
|
||||
reserved_my_peer_id_map: DashMap<String, PeerId>,
|
||||
}
|
||||
|
||||
impl Debug for PeerManager {
|
||||
@@ -271,7 +270,7 @@ impl PeerManager {
|
||||
|
||||
exit_nodes,
|
||||
|
||||
directly_connected_conn_map: Arc::new(DashMap::new()),
|
||||
reserved_my_peer_id_map: DashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,8 +314,10 @@ impl PeerManager {
|
||||
pub async fn add_client_tunnel(
|
||||
&self,
|
||||
tunnel: Box<dyn Tunnel>,
|
||||
is_directly_connected: bool,
|
||||
) -> Result<(PeerId, PeerConnId), Error> {
|
||||
let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel);
|
||||
peer.set_is_hole_punched(!is_directly_connected);
|
||||
peer.do_handshake_as_client().await?;
|
||||
let conn_id = peer.get_conn_id();
|
||||
let peer_id = peer.get_peer_id();
|
||||
@@ -330,72 +331,12 @@ impl PeerManager {
|
||||
Ok((peer_id, conn_id))
|
||||
}
|
||||
|
||||
fn add_directly_connected_conn(&self, peer_id: PeerId, conn_id: uuid::Uuid) {
|
||||
let _ = self
|
||||
.directly_connected_conn_map
|
||||
.entry(peer_id)
|
||||
.or_insert_with(DashSet::new)
|
||||
.insert(conn_id);
|
||||
}
|
||||
|
||||
pub fn has_directly_connected_conn(&self, peer_id: PeerId) -> bool {
|
||||
self.directly_connected_conn_map
|
||||
.get(&peer_id)
|
||||
.map_or(false, |x| !x.is_empty())
|
||||
}
|
||||
|
||||
async fn start_peer_conn_close_event_handler(&self) {
|
||||
let dmap = self.directly_connected_conn_map.clone();
|
||||
let mut event_recv = self.global_ctx.subscribe();
|
||||
let peer_map = self.peers.clone();
|
||||
use tokio::sync::broadcast::error::RecvError;
|
||||
self.tasks.lock().await.spawn(async move {
|
||||
loop {
|
||||
match event_recv.recv().await {
|
||||
Err(RecvError::Closed) => {
|
||||
tracing::error!("peer conn close event handler exit");
|
||||
break;
|
||||
}
|
||||
Err(RecvError::Lagged(_)) => {
|
||||
tracing::warn!("peer conn close event handler lagged");
|
||||
event_recv = event_recv.resubscribe();
|
||||
let alive_conns = peer_map.get_alive_conns();
|
||||
for p in dmap.iter_mut() {
|
||||
p.retain(|x| alive_conns.contains_key(&(*p.key(), *x)));
|
||||
}
|
||||
dmap.retain(|_, v| !v.is_empty());
|
||||
}
|
||||
Ok(event) => {
|
||||
if let GlobalCtxEvent::PeerConnRemoved(info) = event {
|
||||
let mut need_remove = false;
|
||||
if let Some(set) = dmap.get_mut(&info.peer_id) {
|
||||
let conn_id = info.conn_id.parse().unwrap();
|
||||
let old = set.remove(&conn_id);
|
||||
tracing::info!(
|
||||
?old,
|
||||
?info,
|
||||
"try remove conn id from directly connected map"
|
||||
);
|
||||
need_remove = set.is_empty();
|
||||
}
|
||||
|
||||
if need_remove {
|
||||
dmap.remove(&info.peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn add_direct_tunnel(
|
||||
&self,
|
||||
t: Box<dyn Tunnel>,
|
||||
) -> Result<(PeerId, PeerConnId), Error> {
|
||||
let (peer_id, conn_id) = self.add_client_tunnel(t).await?;
|
||||
self.add_directly_connected_conn(peer_id, conn_id);
|
||||
Ok((peer_id, conn_id))
|
||||
if let Some(peer) = self.peers.get_peer_by_id(peer_id) {
|
||||
peer.has_directly_connected_conn()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
@@ -410,10 +351,10 @@ impl PeerManager {
|
||||
let t = ns
|
||||
.run_async(|| async move { connector.connect().await })
|
||||
.await?;
|
||||
self.add_direct_tunnel(t).await
|
||||
self.add_client_tunnel(t, true).await
|
||||
}
|
||||
|
||||
#[tracing::instrument]
|
||||
#[tracing::instrument(ret)]
|
||||
pub async fn add_tunnel_as_server(
|
||||
&self,
|
||||
tunnel: Box<dyn Tunnel>,
|
||||
@@ -421,24 +362,57 @@ impl PeerManager {
|
||||
) -> Result<(), Error> {
|
||||
tracing::info!("add tunnel as server start");
|
||||
let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel);
|
||||
peer.do_handshake_as_server().await?;
|
||||
if peer.get_network_identity().network_name
|
||||
== self.global_ctx.get_network_identity().network_name
|
||||
{
|
||||
let (peer_id, conn_id) = (peer.get_peer_id(), peer.get_conn_id());
|
||||
self.add_new_peer_conn(peer).await?;
|
||||
if is_directly_connected {
|
||||
self.add_directly_connected_conn(peer_id, conn_id);
|
||||
peer.do_handshake_as_server_ext(|peer, msg| {
|
||||
if msg.network_name
|
||||
== self.global_ctx.get_network_identity().network_name
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if self.global_ctx.config.get_flags().private_mode {
|
||||
return Err(Error::SecretKeyError(
|
||||
"private mode is turned on, network identity not match".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let mut peer_id = self
|
||||
.foreign_network_manager
|
||||
.get_network_peer_id(&msg.network_name);
|
||||
if peer_id.is_none() {
|
||||
peer_id = Some(*self.reserved_my_peer_id_map.entry(msg.network_name.clone()).or_insert_with(|| {
|
||||
rand::random::<PeerId>()
|
||||
}).value());
|
||||
}
|
||||
peer.set_peer_id(peer_id.clone().unwrap());
|
||||
|
||||
tracing::info!(
|
||||
?peer_id,
|
||||
?msg.network_name,
|
||||
"handshake as server with foreign network, new peer id: {}, peer id in foreign manager: {:?}",
|
||||
peer.get_my_peer_id(), peer_id
|
||||
);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
|
||||
let peer_network_name = peer.get_network_identity().network_name.clone();
|
||||
|
||||
if peer_network_name == self.global_ctx.get_network_identity().network_name {
|
||||
peer.set_is_hole_punched(!is_directly_connected);
|
||||
self.add_new_peer_conn(peer).await?;
|
||||
} else {
|
||||
self.foreign_network_manager.add_peer_conn(peer).await?;
|
||||
}
|
||||
|
||||
self.reserved_my_peer_id_map.remove(&peer_network_name);
|
||||
|
||||
tracing::info!("add tunnel as server done");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn try_handle_foreign_network_packet(
|
||||
packet: ZCPacket,
|
||||
mut packet: ZCPacket,
|
||||
my_peer_id: PeerId,
|
||||
peer_map: &PeerMap,
|
||||
foreign_network_mgr: &ForeignNetworkManager,
|
||||
@@ -455,6 +429,10 @@ impl PeerManager {
|
||||
let foreign_network_name = foreign_hdr.get_network_name(packet.payload());
|
||||
let foreign_peer_id = foreign_hdr.get_dst_peer_id();
|
||||
|
||||
let foreign_network_my_peer_id =
|
||||
foreign_network_mgr.get_network_peer_id(&foreign_network_name);
|
||||
|
||||
// NOTICE: the to peer id is modified by the src from foreign network my peer id to the origin my peer id
|
||||
if to_peer_id == my_peer_id {
|
||||
// packet sent from other peer to me, extract the inner packet and forward it
|
||||
if let Err(e) = foreign_network_mgr
|
||||
@@ -473,7 +451,27 @@ impl PeerManager {
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
} else if from_peer_id == my_peer_id {
|
||||
} else if Some(from_peer_id) == foreign_network_my_peer_id {
|
||||
// to_peer_id is my peer id for the foreign network, need to convert to the origin my_peer_id of dst
|
||||
let Some(to_peer_id) = peer_map
|
||||
.get_origin_my_peer_id(&foreign_network_name, to_peer_id)
|
||||
.await
|
||||
else {
|
||||
tracing::debug!(
|
||||
?foreign_network_name,
|
||||
?to_peer_id,
|
||||
"cannot find origin my peer id for foreign network."
|
||||
);
|
||||
return Err(packet);
|
||||
};
|
||||
|
||||
// modify the to_peer id from foreign network my peer id to the origin my peer id
|
||||
packet
|
||||
.mut_peer_manager_header()
|
||||
.unwrap()
|
||||
.to_peer_id
|
||||
.set(to_peer_id);
|
||||
|
||||
// packet is generated from foreign network mgr and should be forward to other peer
|
||||
if let Err(e) = peer_map
|
||||
.send_msg(packet, to_peer_id, NextHopPolicy::LeastHop)
|
||||
@@ -488,7 +486,7 @@ impl PeerManager {
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
// target is not me, forward it
|
||||
// target is not me, forward it. try get origin peer id
|
||||
Err(packet)
|
||||
}
|
||||
}
|
||||
@@ -709,6 +707,7 @@ impl PeerManager {
|
||||
last_update: Some(last_update.into()),
|
||||
version: 0,
|
||||
network_secret_digest: info.network_secret_digest.clone(),
|
||||
my_peer_id_for_this_network: info.my_peer_id_for_this_network,
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -955,11 +954,9 @@ impl PeerManager {
|
||||
|
||||
async fn run_clean_peer_without_conn_routine(&self) {
|
||||
let peer_map = self.peers.clone();
|
||||
let dmap = self.directly_connected_conn_map.clone();
|
||||
self.tasks.lock().await.spawn(async move {
|
||||
loop {
|
||||
peer_map.clean_peer_without_conn().await;
|
||||
dmap.retain(|p, v| peer_map.has_peer(*p) && !v.is_empty());
|
||||
tokio::time::sleep(std::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
});
|
||||
@@ -976,8 +973,6 @@ impl PeerManager {
|
||||
}
|
||||
|
||||
pub async fn run(&self) -> Result<(), Error> {
|
||||
self.start_peer_conn_close_event_handler().await;
|
||||
|
||||
match &self.route_algo_inst {
|
||||
RouteAlgoInst::Ospf(route) => self.add_route(route.clone()).await,
|
||||
RouteAlgoInst::None => {}
|
||||
@@ -1036,9 +1031,16 @@ impl PeerManager {
|
||||
.unwrap_or_default(),
|
||||
proxy_cidrs: self
|
||||
.global_ctx
|
||||
.config
|
||||
.get_proxy_cidrs()
|
||||
.into_iter()
|
||||
.map(|x| x.to_string())
|
||||
.map(|x| {
|
||||
if x.mapped_cidr.is_none() {
|
||||
x.cidr.to_string()
|
||||
} else {
|
||||
format!("{}->{}", x.cidr, x.mapped_cidr.unwrap())
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
hostname: self.global_ctx.get_hostname(),
|
||||
stun_info: Some(self.global_ctx.get_stun_info_collector().get_stun_info()),
|
||||
@@ -1063,10 +1065,20 @@ impl PeerManager {
|
||||
}
|
||||
|
||||
pub fn get_directly_connections(&self, peer_id: PeerId) -> DashSet<uuid::Uuid> {
|
||||
self.directly_connected_conn_map
|
||||
.get(&peer_id)
|
||||
.map(|x| x.clone())
|
||||
.unwrap_or_default()
|
||||
if let Some(peer) = self.peers.get_peer_by_id(peer_id) {
|
||||
return peer.get_directly_connections();
|
||||
}
|
||||
|
||||
DashSet::new()
|
||||
}
|
||||
|
||||
pub async fn clear_resources(&self) {
|
||||
let mut peer_pipeline = self.peer_packet_process_pipeline.write().await;
|
||||
peer_pipeline.clear();
|
||||
let mut nic_pipeline = self.nic_packet_process_pipeline.write().await;
|
||||
nic_pipeline.clear();
|
||||
|
||||
self.peer_rpc_mgr.rpc_server().registry().unregister_all();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1078,7 +1090,8 @@ mod tests {
|
||||
use crate::{
|
||||
common::{config::Flags, global_ctx::tests::get_mock_global_ctx},
|
||||
connector::{
|
||||
create_connector_by_url, udp_hole_punch::tests::create_mock_peer_manager_with_mock_stun,
|
||||
create_connector_by_url, direct::PeerManagerForDirectConnector,
|
||||
udp_hole_punch::tests::create_mock_peer_manager_with_mock_stun,
|
||||
},
|
||||
instance::listeners::get_listener_by_url,
|
||||
peers::{
|
||||
@@ -1089,7 +1102,12 @@ mod tests {
|
||||
tests::{connect_peer_manager, wait_route_appear, wait_route_appear_with_cost},
|
||||
},
|
||||
proto::common::{CompressionAlgoPb, NatType, PeerFeatureFlag},
|
||||
tunnel::{common::tests::wait_for_condition, TunnelConnector, TunnelListener},
|
||||
tunnel::{
|
||||
common::tests::wait_for_condition,
|
||||
filter::{tests::DropSendTunnelFilter, TunnelWithFilter},
|
||||
ring::create_ring_tunnel_pair,
|
||||
TunnelConnector, TunnelListener,
|
||||
},
|
||||
};
|
||||
|
||||
use super::PeerManager;
|
||||
@@ -1140,7 +1158,7 @@ mod tests {
|
||||
});
|
||||
|
||||
server_mgr
|
||||
.add_client_tunnel(server.accept().await.unwrap())
|
||||
.add_client_tunnel(server.accept().await.unwrap(), false)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
@@ -1268,6 +1286,12 @@ mod tests {
|
||||
let peer_mgr_d = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let peer_mgr_e = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
|
||||
println!("peer_mgr_a: {}", peer_mgr_a.my_peer_id);
|
||||
println!("peer_mgr_b: {}", peer_mgr_b.my_peer_id);
|
||||
println!("peer_mgr_c: {}", peer_mgr_c.my_peer_id);
|
||||
println!("peer_mgr_d: {}", peer_mgr_d.my_peer_id);
|
||||
println!("peer_mgr_e: {}", peer_mgr_e.my_peer_id);
|
||||
|
||||
connect_peer_manager(peer_mgr_a.clone(), peer_mgr_b.clone()).await;
|
||||
connect_peer_manager(peer_mgr_b.clone(), peer_mgr_c.clone()).await;
|
||||
|
||||
@@ -1323,4 +1347,36 @@ mod tests {
|
||||
.await;
|
||||
assert_eq!(ret, Some(peer_mgr_b.my_peer_id));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_client_inbound_blackhole() {
|
||||
let peer_mgr_a = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let peer_mgr_b = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
|
||||
// a is client, b is server
|
||||
|
||||
let (a_ring, b_ring) = create_ring_tunnel_pair();
|
||||
let a_ring = Box::new(TunnelWithFilter::new(
|
||||
a_ring,
|
||||
DropSendTunnelFilter::new(2, 50000),
|
||||
));
|
||||
|
||||
let a_mgr_copy = peer_mgr_a.clone();
|
||||
tokio::spawn(async move {
|
||||
a_mgr_copy.add_client_tunnel(a_ring, false).await.unwrap();
|
||||
});
|
||||
let b_mgr_copy = peer_mgr_b.clone();
|
||||
tokio::spawn(async move {
|
||||
b_mgr_copy.add_tunnel_as_server(b_ring, true).await.unwrap();
|
||||
});
|
||||
|
||||
wait_for_condition(
|
||||
|| async {
|
||||
let peers = peer_mgr_a.list_peers().await;
|
||||
peers.is_empty()
|
||||
},
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::{
|
||||
global_ctx::{ArcGlobalCtx, GlobalCtxEvent, NetworkIdentity},
|
||||
PeerId,
|
||||
},
|
||||
proto::{cli::PeerConnInfo, common::PeerFeatureFlag},
|
||||
proto::{cli::PeerConnInfo, peer_rpc::RoutePeerInfo},
|
||||
tunnel::{packet_def::ZCPacket, TunnelError},
|
||||
};
|
||||
|
||||
@@ -87,7 +87,7 @@ impl PeerMap {
|
||||
});
|
||||
}
|
||||
|
||||
fn get_peer_by_id(&self, peer_id: PeerId) -> Option<Arc<Peer>> {
|
||||
pub fn get_peer_by_id(&self, peer_id: PeerId) -> Option<Arc<Peer>> {
|
||||
self.peer_map.get(&peer_id).map(|v| v.clone())
|
||||
}
|
||||
|
||||
@@ -194,12 +194,27 @@ impl PeerMap {
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn get_peer_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag> {
|
||||
pub async fn get_route_peer_info(&self, peer_id: PeerId) -> Option<RoutePeerInfo> {
|
||||
for route in self.routes.read().await.iter() {
|
||||
let feature_flag = route.get_feature_flag(peer_id).await;
|
||||
if feature_flag.is_some() {
|
||||
return feature_flag;
|
||||
};
|
||||
if let Some(info) = route.get_peer_info(peer_id).await {
|
||||
return Some(info);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub async fn get_origin_my_peer_id(
|
||||
&self,
|
||||
network_name: &str,
|
||||
foreign_my_peer_id: PeerId,
|
||||
) -> Option<PeerId> {
|
||||
for route in self.routes.read().await.iter() {
|
||||
let origin_peer_id = route
|
||||
.get_origin_my_peer_id(network_name, foreign_my_peer_id)
|
||||
.await;
|
||||
if origin_peer_id.is_some() {
|
||||
return origin_peer_id;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::{
|
||||
collections::BTreeSet,
|
||||
fmt::Debug,
|
||||
hash::RandomState,
|
||||
net::Ipv4Addr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU32, Ordering},
|
||||
@@ -13,9 +12,10 @@ use std::{
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use dashmap::DashMap;
|
||||
use petgraph::{
|
||||
algo::{all_simple_paths, astar, dijkstra},
|
||||
graph::NodeIndex,
|
||||
Directed, Graph,
|
||||
algo::dijkstra,
|
||||
graph::{Graph, NodeIndex},
|
||||
visit::{EdgeRef, IntoNodeReferences},
|
||||
Directed,
|
||||
};
|
||||
use prost::Message;
|
||||
use prost_reflect::{DynamicMessage, ReflectMessage};
|
||||
@@ -33,7 +33,7 @@ use crate::{
|
||||
},
|
||||
peers::route_trait::{Route, RouteInterfaceBox},
|
||||
proto::{
|
||||
common::{Ipv4Inet, NatType, PeerFeatureFlag, StunInfo},
|
||||
common::{Ipv4Inet, NatType, StunInfo},
|
||||
peer_rpc::{
|
||||
route_foreign_network_infos, ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey,
|
||||
OspfRouteRpc, OspfRouteRpcClientFactory, OspfRouteRpcServer, PeerIdVersion,
|
||||
@@ -49,6 +49,7 @@ use crate::{
|
||||
};
|
||||
|
||||
use super::{
|
||||
graph_algo::dijkstra_with_first_hop,
|
||||
peer_rpc::PeerRpcManager,
|
||||
route_trait::{
|
||||
DefaultRouteCostCalculator, ForeignNetworkRouteInfoMap, NextHopPolicy, RouteCostCalculator,
|
||||
@@ -60,7 +61,8 @@ use super::{
|
||||
static SERVICE_ID: u32 = 7;
|
||||
static UPDATE_PEER_INFO_PERIOD: Duration = Duration::from_secs(3600);
|
||||
static REMOVE_DEAD_PEER_INFO_AFTER: Duration = Duration::from_secs(3660);
|
||||
static AVOID_RELAY_COST: i32 = i32::MAX / 512;
|
||||
// the cost (latency between two peers) is i32, i32::MAX is large enough.
|
||||
static AVOID_RELAY_COST: usize = i32::MAX as usize;
|
||||
|
||||
type Version = u32;
|
||||
|
||||
@@ -80,14 +82,13 @@ impl AtomicVersion {
|
||||
self.0.store(version, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
fn inc(&self) {
|
||||
self.0.fetch_add(1, Ordering::Relaxed);
|
||||
fn inc(&self) -> Version {
|
||||
self.0.fetch_add(1, Ordering::Relaxed) + 1
|
||||
}
|
||||
|
||||
fn set_if_larger(&self, version: Version) {
|
||||
if self.get() < version {
|
||||
self.set(version);
|
||||
}
|
||||
fn set_if_larger(&self, version: Version) -> bool {
|
||||
// return true if the version is set.
|
||||
self.0.fetch_max(version, Ordering::Relaxed) < version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,6 +124,7 @@ impl RoutePeerInfo {
|
||||
feature_flag: None,
|
||||
peer_route_id: 0,
|
||||
network_length: 24,
|
||||
quic_port: None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,10 +140,12 @@ impl RoutePeerInfo {
|
||||
cost: 0,
|
||||
ipv4_addr: global_ctx.get_ipv4().map(|x| x.address().into()),
|
||||
proxy_cidrs: global_ctx
|
||||
.config
|
||||
.get_proxy_cidrs()
|
||||
.iter()
|
||||
.map(|x| x.mapped_cidr.unwrap_or(x.cidr))
|
||||
.chain(global_ctx.get_vpn_portal_cidr())
|
||||
.map(|x| x.to_string())
|
||||
.chain(global_ctx.get_vpn_portal_cidr().map(|x| x.to_string()))
|
||||
.collect(),
|
||||
hostname: Some(global_ctx.get_hostname()),
|
||||
udp_stun_info: global_ctx
|
||||
@@ -159,6 +163,8 @@ impl RoutePeerInfo {
|
||||
.get_ipv4()
|
||||
.map(|x| x.network_length() as u32)
|
||||
.unwrap_or(24),
|
||||
|
||||
quic_port: global_ctx.get_quic_proxy_port().map(|x| x as u32),
|
||||
};
|
||||
|
||||
let need_update_periodically = if let Ok(Ok(d)) =
|
||||
@@ -283,13 +289,25 @@ impl RouteConnBitmap {
|
||||
type Error = SyncRouteInfoError;
|
||||
|
||||
// constructed with all infos synced from all peers.
|
||||
#[derive(Debug)]
|
||||
struct SyncedRouteInfo {
|
||||
peer_infos: DashMap<PeerId, RoutePeerInfo>,
|
||||
// prost doesn't support unknown fields, so we use DynamicMessage to store raw infos and progate them to other peers.
|
||||
raw_peer_infos: DashMap<PeerId, DynamicMessage>,
|
||||
conn_map: DashMap<PeerId, (BTreeSet<PeerId>, AtomicVersion)>,
|
||||
foreign_network: DashMap<ForeignNetworkRouteInfoKey, ForeignNetworkRouteInfoEntry>,
|
||||
|
||||
version: AtomicVersion,
|
||||
}
|
||||
|
||||
impl Debug for SyncedRouteInfo {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("SyncedRouteInfo")
|
||||
.field("peer_infos", &self.peer_infos)
|
||||
.field("conn_map", &self.conn_map)
|
||||
.field("foreign_network", &self.foreign_network)
|
||||
.field("version", &self.version.get())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl SyncedRouteInfo {
|
||||
@@ -305,17 +323,24 @@ impl SyncedRouteInfo {
|
||||
self.raw_peer_infos.remove(&peer_id);
|
||||
self.conn_map.remove(&peer_id);
|
||||
self.foreign_network.retain(|k, _| k.peer_id != peer_id);
|
||||
self.version.inc();
|
||||
}
|
||||
|
||||
fn fill_empty_peer_info(&self, peer_ids: &BTreeSet<PeerId>) {
|
||||
let mut need_inc_version = false;
|
||||
for peer_id in peer_ids {
|
||||
self.peer_infos
|
||||
.entry(*peer_id)
|
||||
.or_insert_with(|| RoutePeerInfo::new());
|
||||
self.peer_infos.entry(*peer_id).or_insert_with(|| {
|
||||
need_inc_version = true;
|
||||
RoutePeerInfo::new()
|
||||
});
|
||||
|
||||
self.conn_map
|
||||
.entry(*peer_id)
|
||||
.or_insert_with(|| (BTreeSet::new(), AtomicVersion::new()));
|
||||
self.conn_map.entry(*peer_id).or_insert_with(|| {
|
||||
need_inc_version = true;
|
||||
(BTreeSet::new(), AtomicVersion::new())
|
||||
});
|
||||
}
|
||||
if need_inc_version {
|
||||
self.version.inc();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,6 +402,7 @@ impl SyncedRouteInfo {
|
||||
peer_infos: &Vec<RoutePeerInfo>,
|
||||
raw_peer_infos: &Vec<DynamicMessage>,
|
||||
) -> Result<(), Error> {
|
||||
let mut need_inc_version = false;
|
||||
for (idx, route_info) in peer_infos.iter().enumerate() {
|
||||
let mut route_info = route_info.clone();
|
||||
let raw_route_info = &raw_peer_infos[idx];
|
||||
@@ -410,22 +436,28 @@ impl SyncedRouteInfo {
|
||||
self.raw_peer_infos
|
||||
.insert(route_info.peer_id, raw_route_info.clone());
|
||||
*old_entry = route_info.clone();
|
||||
need_inc_version = true;
|
||||
}
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
need_inc_version = true;
|
||||
self.raw_peer_infos
|
||||
.insert(route_info.peer_id, raw_route_info.clone());
|
||||
route_info.clone()
|
||||
});
|
||||
}
|
||||
if need_inc_version {
|
||||
self.version.inc();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_conn_map(&self, conn_bitmap: &RouteConnBitmap) {
|
||||
self.fill_empty_peer_info(&conn_bitmap.peer_ids.iter().map(|x| x.0).collect());
|
||||
|
||||
let mut need_inc_version = false;
|
||||
|
||||
for (peer_idx, (peer_id, version)) in conn_bitmap.peer_ids.iter().enumerate() {
|
||||
assert!(self.peer_infos.contains_key(peer_id));
|
||||
let connceted_peers = conn_bitmap.get_connected_peers(peer_idx);
|
||||
self.fill_empty_peer_info(&connceted_peers);
|
||||
|
||||
@@ -433,17 +465,19 @@ impl SyncedRouteInfo {
|
||||
.entry(*peer_id)
|
||||
.and_modify(|(old_conn_bitmap, old_version)| {
|
||||
if *version > old_version.get() {
|
||||
*old_conn_bitmap = conn_bitmap.get_connected_peers(peer_idx);
|
||||
*old_conn_bitmap = connceted_peers.clone();
|
||||
need_inc_version = true;
|
||||
old_version.set(*version);
|
||||
}
|
||||
})
|
||||
.or_insert_with(|| {
|
||||
(
|
||||
conn_bitmap.get_connected_peers(peer_idx),
|
||||
version.clone().into(),
|
||||
)
|
||||
need_inc_version = true;
|
||||
(connceted_peers, version.clone().into())
|
||||
});
|
||||
}
|
||||
if need_inc_version {
|
||||
self.version.inc();
|
||||
}
|
||||
}
|
||||
|
||||
fn update_foreign_network(&self, foreign_network: &RouteForeignNetworkInfos) {
|
||||
@@ -483,7 +517,12 @@ impl SyncedRouteInfo {
|
||||
let old_version = old.version;
|
||||
*old = new;
|
||||
|
||||
new_version != old_version
|
||||
if new_version != old_version {
|
||||
self.version.inc();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn update_my_conn_info(&self, my_peer_id: PeerId, connected_peers: BTreeSet<PeerId>) -> bool {
|
||||
@@ -499,6 +538,7 @@ impl SyncedRouteInfo {
|
||||
} else {
|
||||
let _ = std::mem::replace(&mut my_conn_info.value_mut().0, connected_peers);
|
||||
my_conn_info.value().1.inc();
|
||||
self.version.inc();
|
||||
true
|
||||
}
|
||||
}
|
||||
@@ -557,6 +597,10 @@ impl SyncedRouteInfo {
|
||||
updated = true;
|
||||
}
|
||||
|
||||
if updated {
|
||||
self.version.inc();
|
||||
}
|
||||
|
||||
updated
|
||||
}
|
||||
|
||||
@@ -573,13 +617,14 @@ impl SyncedRouteInfo {
|
||||
}
|
||||
}
|
||||
|
||||
type PeerGraph = Graph<PeerId, i32, Directed>;
|
||||
type PeerGraph = Graph<PeerId, usize, Directed>;
|
||||
type PeerIdToNodexIdxMap = DashMap<PeerId, NodeIndex>;
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct NextHopInfo {
|
||||
next_hop_peer_id: PeerId,
|
||||
path_latency: i32,
|
||||
path_len: usize, // path includes src and dst.
|
||||
version: Version,
|
||||
}
|
||||
// dst_peer_id -> (next_hop_peer_id, cost, path_len)
|
||||
type NextHopMap = DashMap<PeerId, NextHopInfo>;
|
||||
@@ -591,6 +636,7 @@ struct RouteTable {
|
||||
next_hop_map: NextHopMap,
|
||||
ipv4_peer_id_map: DashMap<Ipv4Addr, PeerId>,
|
||||
cidr_peer_id_map: DashMap<cidr::IpCidr, PeerId>,
|
||||
next_hop_map_version: AtomicVersion,
|
||||
}
|
||||
|
||||
impl RouteTable {
|
||||
@@ -600,15 +646,23 @@ impl RouteTable {
|
||||
next_hop_map: DashMap::new(),
|
||||
ipv4_peer_id_map: DashMap::new(),
|
||||
cidr_peer_id_map: DashMap::new(),
|
||||
next_hop_map_version: AtomicVersion::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_next_hop(&self, dst_peer_id: PeerId) -> Option<NextHopInfo> {
|
||||
self.next_hop_map.get(&dst_peer_id).map(|x| *x)
|
||||
let cur_version = self.next_hop_map_version.get();
|
||||
self.next_hop_map.get(&dst_peer_id).and_then(|x| {
|
||||
if x.version >= cur_version {
|
||||
Some(*x)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn peer_reachable(&self, peer_id: PeerId) -> bool {
|
||||
self.next_hop_map.contains_key(&peer_id)
|
||||
self.get_next_hop(peer_id).is_some()
|
||||
}
|
||||
|
||||
fn get_nat_type(&self, peer_id: PeerId) -> Option<NatType> {
|
||||
@@ -617,158 +671,16 @@ impl RouteTable {
|
||||
.map(|x| NatType::try_from(x.udp_stun_info as i32).unwrap_or_default())
|
||||
}
|
||||
|
||||
// return graph and start node index (node of my peer id).
|
||||
fn build_peer_graph_from_synced_info<T: RouteCostCalculatorInterface>(
|
||||
peers: Vec<PeerId>,
|
||||
synced_info: &SyncedRouteInfo,
|
||||
cost_calc: &mut T,
|
||||
) -> (PeerGraph, PeerIdToNodexIdxMap) {
|
||||
let mut graph: PeerGraph = Graph::new();
|
||||
let peer_id_to_node_index = PeerIdToNodexIdxMap::new();
|
||||
for peer_id in peers.iter() {
|
||||
peer_id_to_node_index.insert(*peer_id, graph.add_node(*peer_id));
|
||||
}
|
||||
|
||||
for peer_id in peers.iter() {
|
||||
let connected_peers = synced_info
|
||||
.get_connected_peers(*peer_id)
|
||||
.unwrap_or(BTreeSet::new());
|
||||
|
||||
// if avoid relay, just set all outgoing edges to a large value: AVOID_RELAY_COST.
|
||||
let peer_avoid_relay_data = synced_info.get_avoid_relay_data(*peer_id);
|
||||
|
||||
for dst_peer_id in connected_peers.iter() {
|
||||
let Some(dst_idx) = peer_id_to_node_index.get(dst_peer_id) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
graph.add_edge(
|
||||
*peer_id_to_node_index.get(&peer_id).unwrap(),
|
||||
*dst_idx,
|
||||
if peer_avoid_relay_data {
|
||||
AVOID_RELAY_COST
|
||||
} else {
|
||||
cost_calc.calculate_cost(*peer_id, *dst_peer_id)
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
(graph, peer_id_to_node_index)
|
||||
}
|
||||
|
||||
fn gen_next_hop_map_with_least_hop<T: RouteCostCalculatorInterface>(
|
||||
my_peer_id: PeerId,
|
||||
graph: &PeerGraph,
|
||||
idx_map: &PeerIdToNodexIdxMap,
|
||||
cost_calc: &mut T,
|
||||
) -> NextHopMap {
|
||||
let res = dijkstra(&graph, *idx_map.get(&my_peer_id).unwrap(), None, |_| 1);
|
||||
let next_hop_map = NextHopMap::new();
|
||||
for (node_idx, cost) in res.iter() {
|
||||
if *cost == 0 {
|
||||
continue;
|
||||
}
|
||||
let mut all_paths = all_simple_paths::<Vec<_>, _, RandomState>(
|
||||
graph,
|
||||
*idx_map.get(&my_peer_id).unwrap(),
|
||||
*node_idx,
|
||||
*cost - 1,
|
||||
Some(*cost + 1), // considering having avoid relay, the max cost could be a bit larger.
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
assert!(!all_paths.is_empty());
|
||||
all_paths.sort_by(|a, b| a.len().cmp(&b.len()));
|
||||
|
||||
// find a path with least cost.
|
||||
let mut min_cost = i32::MAX;
|
||||
let mut min_path_len = usize::MAX;
|
||||
let mut min_path = Vec::new();
|
||||
for path in all_paths.iter() {
|
||||
if min_path_len < path.len() && min_cost < AVOID_RELAY_COST {
|
||||
// the min path does not contain avoid relay node.
|
||||
break;
|
||||
}
|
||||
|
||||
let mut cost = 0;
|
||||
for i in 0..path.len() - 1 {
|
||||
let src_peer_id = *graph.node_weight(path[i]).unwrap();
|
||||
let dst_peer_id = *graph.node_weight(path[i + 1]).unwrap();
|
||||
let edge_weight = *graph
|
||||
.edge_weight(graph.find_edge(path[i], path[i + 1]).unwrap())
|
||||
.unwrap();
|
||||
if edge_weight != 1 {
|
||||
// means avoid relay.
|
||||
cost += edge_weight;
|
||||
} else {
|
||||
cost += cost_calc.calculate_cost(src_peer_id, dst_peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
if cost <= min_cost {
|
||||
min_cost = cost;
|
||||
min_path = path.clone();
|
||||
min_path_len = path.len();
|
||||
}
|
||||
}
|
||||
next_hop_map.insert(
|
||||
*graph.node_weight(*node_idx).unwrap(),
|
||||
NextHopInfo {
|
||||
next_hop_peer_id: *graph.node_weight(min_path[1]).unwrap(),
|
||||
path_latency: min_cost,
|
||||
path_len: min_path_len,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
next_hop_map
|
||||
}
|
||||
|
||||
fn gen_next_hop_map_with_least_cost(
|
||||
my_peer_id: PeerId,
|
||||
graph: &PeerGraph,
|
||||
idx_map: &PeerIdToNodexIdxMap,
|
||||
) -> NextHopMap {
|
||||
let next_hop_map = NextHopMap::new();
|
||||
for item in idx_map.iter() {
|
||||
if *item.key() == my_peer_id {
|
||||
continue;
|
||||
}
|
||||
|
||||
let dst_peer_node_idx = *item.value();
|
||||
|
||||
let Some((cost, path)) = astar::astar(
|
||||
graph,
|
||||
*idx_map.get(&my_peer_id).unwrap(),
|
||||
|node_idx| node_idx == dst_peer_node_idx,
|
||||
|e| *e.weight(),
|
||||
|_| 0,
|
||||
) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
next_hop_map.insert(
|
||||
*item.key(),
|
||||
NextHopInfo {
|
||||
next_hop_peer_id: *graph.node_weight(path[1]).unwrap(),
|
||||
path_latency: cost,
|
||||
path_len: path.len(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
next_hop_map
|
||||
}
|
||||
|
||||
fn build_from_synced_info<T: RouteCostCalculatorInterface>(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
synced_info: &SyncedRouteInfo,
|
||||
policy: NextHopPolicy,
|
||||
mut cost_calc: T,
|
||||
) {
|
||||
// build peer_infos
|
||||
self.peer_infos.clear();
|
||||
cost_calc: &T,
|
||||
) -> (PeerGraph, NodeIndex) {
|
||||
let mut graph: PeerGraph = PeerGraph::new();
|
||||
|
||||
let mut start_node_idx = None;
|
||||
let peer_id_to_node_index: PeerIdToNodexIdxMap = DashMap::new();
|
||||
for item in synced_info.peer_infos.iter() {
|
||||
let peer_id = item.key();
|
||||
let info = item.value();
|
||||
@@ -777,57 +689,203 @@ impl RouteTable {
|
||||
continue;
|
||||
}
|
||||
|
||||
self.peer_infos.insert(*peer_id, info.clone());
|
||||
let node_idx = graph.add_node(*peer_id);
|
||||
|
||||
peer_id_to_node_index.insert(*peer_id, node_idx);
|
||||
if *peer_id == my_peer_id {
|
||||
start_node_idx = Some(node_idx);
|
||||
}
|
||||
}
|
||||
|
||||
if self.peer_infos.is_empty() {
|
||||
if start_node_idx.is_none() {
|
||||
return (graph, NodeIndex::end());
|
||||
}
|
||||
|
||||
for item in peer_id_to_node_index.iter() {
|
||||
let src_peer_id = item.key();
|
||||
let src_node_idx = item.value();
|
||||
let connected_peers = synced_info
|
||||
.get_connected_peers(*src_peer_id)
|
||||
.unwrap_or(BTreeSet::new());
|
||||
|
||||
// if avoid relay, just set all outgoing edges to a large value: AVOID_RELAY_COST.
|
||||
let peer_avoid_relay_data = synced_info.get_avoid_relay_data(*src_peer_id);
|
||||
|
||||
for dst_peer_id in connected_peers.iter() {
|
||||
let Some(dst_node_idx) = peer_id_to_node_index.get(dst_peer_id) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let mut cost = cost_calc.calculate_cost(*src_peer_id, *dst_peer_id) as usize;
|
||||
if peer_avoid_relay_data {
|
||||
cost += AVOID_RELAY_COST;
|
||||
}
|
||||
|
||||
graph.add_edge(*src_node_idx, *dst_node_idx, cost);
|
||||
}
|
||||
}
|
||||
|
||||
(graph, start_node_idx.unwrap())
|
||||
}
|
||||
|
||||
fn clean_expired_route_info(&self) {
|
||||
let cur_version = self.next_hop_map_version.get();
|
||||
self.next_hop_map.retain(|_, v| {
|
||||
// remove next hop map for peers we cannot reach.
|
||||
v.version >= cur_version
|
||||
});
|
||||
self.peer_infos.retain(|k, _| {
|
||||
// remove peer info for peers we cannot reach.
|
||||
self.next_hop_map.contains_key(k)
|
||||
});
|
||||
self.ipv4_peer_id_map.retain(|_, v| {
|
||||
// remove ipv4 map for peers we cannot reach.
|
||||
self.next_hop_map.contains_key(v)
|
||||
});
|
||||
self.cidr_peer_id_map.retain(|_, v| {
|
||||
// remove cidr map for peers we cannot reach.
|
||||
self.next_hop_map.contains_key(v)
|
||||
});
|
||||
}
|
||||
|
||||
fn gen_next_hop_map_with_least_hop(
|
||||
&self,
|
||||
graph: &PeerGraph,
|
||||
start_node: &NodeIndex,
|
||||
version: Version,
|
||||
) {
|
||||
let normalize_edge_cost = |e: petgraph::graph::EdgeReference<usize>| {
|
||||
if *e.weight() >= AVOID_RELAY_COST {
|
||||
AVOID_RELAY_COST + 1
|
||||
} else {
|
||||
1
|
||||
}
|
||||
};
|
||||
// Step 1: 第一次 Dijkstra - 计算最短跳数
|
||||
let path_len_map = dijkstra(&graph, *start_node, None, normalize_edge_cost);
|
||||
|
||||
// Step 2: 构建最短跳数子图(只保留属于最短路径和 AVOID RELAY 的边)
|
||||
let mut subgraph: PeerGraph = PeerGraph::new();
|
||||
let mut start_node_idx = None;
|
||||
for (node_idx, peer_id) in graph.node_references() {
|
||||
let new_node_idx = subgraph.add_node(*peer_id);
|
||||
if node_idx == *start_node {
|
||||
start_node_idx = Some(new_node_idx);
|
||||
}
|
||||
}
|
||||
|
||||
for edge in graph.edge_references() {
|
||||
let (src, tgt) = graph.edge_endpoints(edge.id()).unwrap();
|
||||
let Some(src_path_len) = path_len_map.get(&src) else {
|
||||
continue;
|
||||
};
|
||||
let Some(tgt_path_len) = path_len_map.get(&tgt) else {
|
||||
continue;
|
||||
};
|
||||
if *src_path_len + normalize_edge_cost(edge) == *tgt_path_len {
|
||||
subgraph.add_edge(src, tgt, *edge.weight());
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: 第二次 Dijkstra - 在子图上找代价最小的路径
|
||||
self.gen_next_hop_map_with_least_cost(&subgraph, &start_node_idx.clone().unwrap(), version);
|
||||
}
|
||||
|
||||
fn gen_next_hop_map_with_least_cost(
|
||||
&self,
|
||||
graph: &PeerGraph,
|
||||
start_node: &NodeIndex,
|
||||
version: Version,
|
||||
) {
|
||||
let (costs, next_hops) = dijkstra_with_first_hop(&graph, *start_node, |e| *e.weight());
|
||||
|
||||
for (dst, (next_hop, path_len)) in next_hops.iter() {
|
||||
let info = NextHopInfo {
|
||||
next_hop_peer_id: *graph.node_weight(*next_hop).unwrap(),
|
||||
path_latency: (*costs.get(dst).unwrap() % AVOID_RELAY_COST) as i32,
|
||||
path_len: *path_len as usize,
|
||||
version,
|
||||
};
|
||||
let dst_peer_id = *graph.node_weight(*dst).unwrap();
|
||||
self.next_hop_map
|
||||
.entry(dst_peer_id)
|
||||
.and_modify(|x| {
|
||||
if x.version < version {
|
||||
*x = info;
|
||||
}
|
||||
})
|
||||
.or_insert(info);
|
||||
}
|
||||
|
||||
self.next_hop_map_version.set_if_larger(version);
|
||||
}
|
||||
|
||||
fn build_from_synced_info<T: RouteCostCalculatorInterface>(
|
||||
&self,
|
||||
my_peer_id: PeerId,
|
||||
synced_info: &SyncedRouteInfo,
|
||||
policy: NextHopPolicy,
|
||||
cost_calc: &T,
|
||||
) {
|
||||
let version = synced_info.version.get();
|
||||
|
||||
// build next hop map
|
||||
let (graph, start_node) =
|
||||
Self::build_peer_graph_from_synced_info(my_peer_id, &synced_info, cost_calc);
|
||||
|
||||
if graph.node_count() == 0 {
|
||||
tracing::warn!("no peer in graph, cannot build next hop map");
|
||||
return;
|
||||
}
|
||||
|
||||
// build next hop map
|
||||
self.next_hop_map.clear();
|
||||
self.next_hop_map.insert(
|
||||
my_peer_id,
|
||||
NextHopInfo {
|
||||
next_hop_peer_id: my_peer_id,
|
||||
path_latency: 0,
|
||||
path_len: 1,
|
||||
},
|
||||
);
|
||||
let (graph, idx_map) = Self::build_peer_graph_from_synced_info(
|
||||
self.peer_infos.iter().map(|x| *x.key()).collect(),
|
||||
&synced_info,
|
||||
&mut cost_calc,
|
||||
);
|
||||
let next_hop_map = if matches!(policy, NextHopPolicy::LeastHop) {
|
||||
Self::gen_next_hop_map_with_least_hop(my_peer_id, &graph, &idx_map, &mut cost_calc)
|
||||
if matches!(policy, NextHopPolicy::LeastHop) {
|
||||
self.gen_next_hop_map_with_least_hop(&graph, &start_node, version);
|
||||
} else {
|
||||
Self::gen_next_hop_map_with_least_cost(my_peer_id, &graph, &idx_map)
|
||||
self.gen_next_hop_map_with_least_cost(&graph, &start_node, version);
|
||||
};
|
||||
for item in next_hop_map.iter() {
|
||||
self.next_hop_map.insert(*item.key(), *item.value());
|
||||
}
|
||||
// build graph
|
||||
|
||||
// build ipv4_peer_id_map, cidr_peer_id_map
|
||||
self.ipv4_peer_id_map.clear();
|
||||
self.cidr_peer_id_map.clear();
|
||||
for item in self.peer_infos.iter() {
|
||||
// only set ipv4 map for peers we can reach.
|
||||
if !self.next_hop_map.contains_key(item.key()) {
|
||||
// build peer_infos, ipv4_peer_id_map, cidr_peer_id_map
|
||||
// only set map for peers we can reach.
|
||||
for item in self.next_hop_map.iter() {
|
||||
if item.version < version {
|
||||
// skip if the next hop entry is outdated. (peer is unreachable)
|
||||
continue;
|
||||
}
|
||||
|
||||
let peer_id = item.key();
|
||||
let info = item.value();
|
||||
let Some(info) = synced_info.peer_infos.get(peer_id) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
self.peer_infos.insert(*peer_id, info.clone());
|
||||
|
||||
let is_new_peer_better = |old_peer_id: PeerId| -> bool {
|
||||
let old_next_hop = self.get_next_hop(old_peer_id);
|
||||
let new_next_hop = item.value();
|
||||
old_next_hop.is_none() || new_next_hop.path_len < old_next_hop.unwrap().path_len
|
||||
};
|
||||
|
||||
if let Some(ipv4_addr) = info.ipv4_addr {
|
||||
self.ipv4_peer_id_map.insert(ipv4_addr.into(), *peer_id);
|
||||
self.ipv4_peer_id_map
|
||||
.entry(ipv4_addr.into())
|
||||
.and_modify(|v| {
|
||||
if *v != *peer_id && is_new_peer_better(*v) {
|
||||
*v = *peer_id;
|
||||
}
|
||||
})
|
||||
.or_insert(*peer_id);
|
||||
}
|
||||
|
||||
for cidr in info.proxy_cidrs.iter() {
|
||||
self.cidr_peer_id_map
|
||||
.insert(cidr.parse().unwrap(), *peer_id);
|
||||
.entry(cidr.parse().unwrap())
|
||||
.and_modify(|v| {
|
||||
if *v != *peer_id && is_new_peer_better(*v) {
|
||||
// if the next hop is not set or the new next hop is better, update it.
|
||||
*v = *peer_id;
|
||||
}
|
||||
})
|
||||
.or_insert(*peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1022,12 +1080,14 @@ struct PeerRouteServiceImpl {
|
||||
|
||||
interface: Mutex<Option<RouteInterfaceBox>>,
|
||||
|
||||
cost_calculator: std::sync::Mutex<Option<RouteCostCalculator>>,
|
||||
cost_calculator: std::sync::RwLock<Option<RouteCostCalculator>>,
|
||||
route_table: RouteTable,
|
||||
route_table_with_cost: RouteTable,
|
||||
foreign_network_owner_map: DashMap<NetworkIdentity, Vec<PeerId>>,
|
||||
foreign_network_my_peer_id_map: DashMap<(String, PeerId), PeerId>,
|
||||
synced_route_info: SyncedRouteInfo,
|
||||
cached_local_conn_map: std::sync::Mutex<RouteConnBitmap>,
|
||||
cached_local_conn_map_version: AtomicVersion,
|
||||
|
||||
last_update_my_foreign_network: AtomicCell<Option<std::time::Instant>>,
|
||||
|
||||
@@ -1045,6 +1105,10 @@ impl Debug for PeerRouteServiceImpl {
|
||||
.field("route_table_with_cost", &self.route_table_with_cost)
|
||||
.field("synced_route_info", &self.synced_route_info)
|
||||
.field("foreign_network_owner_map", &self.foreign_network_owner_map)
|
||||
.field(
|
||||
"foreign_network_my_peer_id_map",
|
||||
&self.foreign_network_my_peer_id_map,
|
||||
)
|
||||
.field(
|
||||
"cached_local_conn_map",
|
||||
&self.cached_local_conn_map.lock().unwrap(),
|
||||
@@ -1063,19 +1127,22 @@ impl PeerRouteServiceImpl {
|
||||
|
||||
interface: Mutex::new(None),
|
||||
|
||||
cost_calculator: std::sync::Mutex::new(Some(Box::new(DefaultRouteCostCalculator))),
|
||||
cost_calculator: std::sync::RwLock::new(Some(Box::new(DefaultRouteCostCalculator))),
|
||||
|
||||
route_table: RouteTable::new(),
|
||||
route_table_with_cost: RouteTable::new(),
|
||||
foreign_network_owner_map: DashMap::new(),
|
||||
foreign_network_my_peer_id_map: DashMap::new(),
|
||||
|
||||
synced_route_info: SyncedRouteInfo {
|
||||
peer_infos: DashMap::new(),
|
||||
raw_peer_infos: DashMap::new(),
|
||||
conn_map: DashMap::new(),
|
||||
foreign_network: DashMap::new(),
|
||||
version: AtomicVersion::new(),
|
||||
},
|
||||
cached_local_conn_map: std::sync::Mutex::new(RouteConnBitmap::new()),
|
||||
cached_local_conn_map_version: AtomicVersion::new(),
|
||||
|
||||
last_update_my_foreign_network: AtomicCell::new(None),
|
||||
|
||||
@@ -1171,26 +1238,41 @@ impl PeerRouteServiceImpl {
|
||||
}
|
||||
|
||||
fn update_route_table(&self) {
|
||||
let mut calc_locked = self.cost_calculator.lock().unwrap();
|
||||
self.cost_calculator
|
||||
.write()
|
||||
.unwrap()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.begin_update();
|
||||
|
||||
let calc_locked = self.cost_calculator.read().unwrap();
|
||||
|
||||
calc_locked.as_mut().unwrap().begin_update();
|
||||
self.route_table.build_from_synced_info(
|
||||
self.my_peer_id,
|
||||
&self.synced_route_info,
|
||||
NextHopPolicy::LeastHop,
|
||||
calc_locked.as_mut().unwrap(),
|
||||
calc_locked.as_ref().unwrap(),
|
||||
);
|
||||
|
||||
self.route_table_with_cost.build_from_synced_info(
|
||||
self.my_peer_id,
|
||||
&self.synced_route_info,
|
||||
NextHopPolicy::LeastCost,
|
||||
calc_locked.as_mut().unwrap(),
|
||||
calc_locked.as_ref().unwrap(),
|
||||
);
|
||||
calc_locked.as_mut().unwrap().end_update();
|
||||
|
||||
drop(calc_locked);
|
||||
|
||||
self.cost_calculator
|
||||
.write()
|
||||
.unwrap()
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.end_update();
|
||||
}
|
||||
|
||||
fn update_foreign_network_owner_map(&self) {
|
||||
self.foreign_network_my_peer_id_map.clear();
|
||||
self.foreign_network_owner_map.clear();
|
||||
for item in self.synced_route_info.foreign_network.iter() {
|
||||
let key = item.key();
|
||||
@@ -1215,13 +1297,18 @@ impl PeerRouteServiceImpl {
|
||||
self.foreign_network_owner_map
|
||||
.entry(network_identity)
|
||||
.or_insert_with(|| Vec::new())
|
||||
.push(key.peer_id);
|
||||
.push(entry.my_peer_id_for_this_network);
|
||||
|
||||
self.foreign_network_my_peer_id_map.insert(
|
||||
(key.network_name.clone(), entry.my_peer_id_for_this_network),
|
||||
key.peer_id,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn cost_calculator_need_update(&self) -> bool {
|
||||
self.cost_calculator
|
||||
.lock()
|
||||
.read()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|x| x.need_update())
|
||||
@@ -1234,6 +1321,8 @@ impl PeerRouteServiceImpl {
|
||||
// update route table first because we want to filter out unreachable peers.
|
||||
self.update_route_table();
|
||||
|
||||
let synced_version = self.synced_route_info.version.get();
|
||||
|
||||
// the conn_bitmap should contain complete list of directly connected peers.
|
||||
// use union of dst peers can preserve this property.
|
||||
let all_dst_peer_ids = self
|
||||
@@ -1259,7 +1348,9 @@ impl PeerRouteServiceImpl {
|
||||
|
||||
let all_peer_ids = &conn_bitmap.peer_ids;
|
||||
for (peer_idx, (peer_id, _)) in all_peer_ids.iter().enumerate() {
|
||||
let connected = self.synced_route_info.conn_map.get(peer_id).unwrap();
|
||||
let Some(connected) = self.synced_route_info.conn_map.get(peer_id) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for (idx, (other_peer_id, _)) in all_peer_ids.iter().enumerate() {
|
||||
if connected.0.contains(other_peer_id) {
|
||||
@@ -1269,7 +1360,13 @@ impl PeerRouteServiceImpl {
|
||||
}
|
||||
}
|
||||
|
||||
*self.cached_local_conn_map.lock().unwrap() = conn_bitmap;
|
||||
let mut locked = self.cached_local_conn_map.lock().unwrap();
|
||||
if self
|
||||
.cached_local_conn_map_version
|
||||
.set_if_larger(synced_version)
|
||||
{
|
||||
*locked = conn_bitmap;
|
||||
}
|
||||
}
|
||||
|
||||
fn build_route_info(&self, session: &SyncRouteSession) -> Option<Vec<RoutePeerInfo>> {
|
||||
@@ -1303,7 +1400,7 @@ impl PeerRouteServiceImpl {
|
||||
.dst_saved_conn_bitmap_version
|
||||
.get(&peer_id)
|
||||
.map(|item| item.get());
|
||||
if Some(*local_version) != peer_version {
|
||||
if peer_version.is_none() || peer_version.unwrap() < *local_version {
|
||||
need_update = true;
|
||||
break;
|
||||
}
|
||||
@@ -1411,6 +1508,9 @@ impl PeerRouteServiceImpl {
|
||||
for p in to_remove.iter() {
|
||||
self.synced_route_info.foreign_network.remove(p);
|
||||
}
|
||||
|
||||
self.route_table.clean_expired_route_info();
|
||||
self.route_table_with_cost.clean_expired_route_info();
|
||||
}
|
||||
|
||||
fn build_sync_route_raw_req(
|
||||
@@ -1441,8 +1541,6 @@ impl PeerRouteServiceImpl {
|
||||
req_dynamic_msg.set_field_by_name("peer_infos", Value::Message(peer_infos));
|
||||
}
|
||||
|
||||
tracing::trace!(?req_dynamic_msg, "build_sync_route_raw_req");
|
||||
|
||||
req_dynamic_msg
|
||||
}
|
||||
|
||||
@@ -1558,7 +1656,12 @@ impl PeerRouteServiceImpl {
|
||||
}
|
||||
|
||||
fn update_peer_info_last_update(&self) {
|
||||
tracing::debug!(?self, "update_peer_info_last_update");
|
||||
tracing::debug!(
|
||||
"update_peer_info_last_update, my_peer_id: {:?}, prev: {:?}, new: {:?}",
|
||||
self.my_peer_id,
|
||||
self.peer_info_last_update.load(),
|
||||
std::time::Instant::now()
|
||||
);
|
||||
self.peer_info_last_update.store(std::time::Instant::now());
|
||||
}
|
||||
|
||||
@@ -2001,7 +2104,6 @@ impl PeerRoute {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(session_mgr))]
|
||||
async fn maintain_session_tasks(
|
||||
session_mgr: RouteSessionManager,
|
||||
service_impl: Arc<PeerRouteServiceImpl>,
|
||||
@@ -2009,7 +2111,6 @@ impl PeerRoute {
|
||||
session_mgr.maintain_sessions(service_impl).await;
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(session_mgr))]
|
||||
async fn update_my_peer_info_routine(
|
||||
service_impl: Arc<PeerRouteServiceImpl>,
|
||||
session_mgr: RouteSessionManager,
|
||||
@@ -2022,6 +2123,7 @@ impl PeerRoute {
|
||||
|
||||
if service_impl.cost_calculator_need_update() {
|
||||
tracing::debug!("cost_calculator_need_update");
|
||||
service_impl.synced_route_info.version.inc();
|
||||
service_impl.update_route_table();
|
||||
}
|
||||
|
||||
@@ -2136,7 +2238,7 @@ impl Route for PeerRoute {
|
||||
let next_hop_peer_latency_first = route_table_with_cost.get_next_hop(*item.key());
|
||||
let mut route: crate::proto::cli::Route = item.value().clone().into();
|
||||
route.next_hop_peer_id = next_hop_peer.next_hop_peer_id;
|
||||
route.cost = (next_hop_peer.path_len - 1) as i32;
|
||||
route.cost = next_hop_peer.path_len as i32;
|
||||
route.path_latency = next_hop_peer.path_latency;
|
||||
|
||||
route.next_hop_peer_id_latency_first =
|
||||
@@ -2166,7 +2268,8 @@ impl Route for PeerRoute {
|
||||
}
|
||||
|
||||
async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {
|
||||
*self.service_impl.cost_calculator.lock().unwrap() = Some(_cost_fn);
|
||||
*self.service_impl.cost_calculator.write().unwrap() = Some(_cost_fn);
|
||||
self.service_impl.synced_route_info.version.inc();
|
||||
self.service_impl.update_route_table();
|
||||
}
|
||||
|
||||
@@ -2206,12 +2309,23 @@ impl Route for PeerRoute {
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
async fn get_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag> {
|
||||
async fn get_origin_my_peer_id(
|
||||
&self,
|
||||
network_name: &str,
|
||||
foreign_my_peer_id: PeerId,
|
||||
) -> Option<PeerId> {
|
||||
self.service_impl
|
||||
.foreign_network_my_peer_id_map
|
||||
.get(&(network_name.to_string(), foreign_my_peer_id))
|
||||
.map(|x| *x)
|
||||
}
|
||||
|
||||
async fn get_peer_info(&self, peer_id: PeerId) -> Option<RoutePeerInfo> {
|
||||
self.service_impl
|
||||
.route_table
|
||||
.peer_infos
|
||||
.get(&peer_id)
|
||||
.and_then(|x| x.feature_flag.clone())
|
||||
.map(|x| x.clone())
|
||||
}
|
||||
|
||||
async fn get_peer_info_last_update_time(&self) -> Instant {
|
||||
@@ -2307,7 +2421,10 @@ mod tests {
|
||||
|
||||
for r in vec![r_a.clone(), r_b.clone()].iter() {
|
||||
wait_for_condition(
|
||||
|| async { r.list_routes().await.len() == 1 },
|
||||
|| async {
|
||||
println!("route: {:?}", r.list_routes().await);
|
||||
r.list_routes().await.len() == 1
|
||||
},
|
||||
Duration::from_secs(5),
|
||||
)
|
||||
.await;
|
||||
@@ -2348,6 +2465,8 @@ mod tests {
|
||||
assert_eq!(i_a.0, i_b.1);
|
||||
assert_eq!(i_b.0, i_a.1);
|
||||
|
||||
println!("after drop p_b, r_b");
|
||||
|
||||
drop(r_b);
|
||||
drop(p_b);
|
||||
|
||||
|
||||
@@ -4,11 +4,9 @@ use dashmap::DashMap;
|
||||
|
||||
use crate::{
|
||||
common::{global_ctx::NetworkIdentity, PeerId},
|
||||
proto::{
|
||||
common::PeerFeatureFlag,
|
||||
peer_rpc::{
|
||||
ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, RouteForeignNetworkInfos,
|
||||
},
|
||||
proto::peer_rpc::{
|
||||
ForeignNetworkRouteInfoEntry, ForeignNetworkRouteInfoKey, RouteForeignNetworkInfos,
|
||||
RoutePeerInfo,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -95,9 +93,19 @@ pub trait Route {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
// my peer id in foreign network is different from the one in local network
|
||||
// this function is used to get the peer id in local network
|
||||
async fn get_origin_my_peer_id(
|
||||
&self,
|
||||
_network_name: &str,
|
||||
_foreign_my_peer_id: PeerId,
|
||||
) -> Option<PeerId> {
|
||||
None
|
||||
}
|
||||
|
||||
async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {}
|
||||
|
||||
async fn get_feature_flag(&self, peer_id: PeerId) -> Option<PeerFeatureFlag>;
|
||||
async fn get_peer_info(&self, peer_id: PeerId) -> Option<RoutePeerInfo>;
|
||||
|
||||
async fn get_peer_info_last_update_time(&self) -> std::time::Instant;
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ pub async fn connect_peer_manager(client: Arc<PeerManager>, server: Arc<PeerMana
|
||||
let (a_ring, b_ring) = create_ring_tunnel_pair();
|
||||
let a_mgr_copy = client.clone();
|
||||
tokio::spawn(async move {
|
||||
a_mgr_copy.add_client_tunnel(a_ring).await.unwrap();
|
||||
a_mgr_copy.add_client_tunnel(a_ring, false).await.unwrap();
|
||||
});
|
||||
let b_mgr_copy = server.clone();
|
||||
tokio::spawn(async move {
|
||||
|
||||
@@ -30,6 +30,7 @@ message PeerConnInfo {
|
||||
float loss_rate = 7;
|
||||
bool is_client = 8;
|
||||
string network_name = 9;
|
||||
bool is_closed = 10;
|
||||
}
|
||||
|
||||
message PeerInfo {
|
||||
@@ -102,6 +103,7 @@ message ListForeignNetworkRequest {}
|
||||
message ForeignNetworkEntryPb {
|
||||
repeated PeerInfo peers = 1;
|
||||
bytes network_secret_digest = 2;
|
||||
uint32 my_peer_id_for_this_network = 3;
|
||||
}
|
||||
|
||||
message ListForeignNetworkResponse {
|
||||
@@ -185,6 +187,7 @@ service VpnPortalRpc {
|
||||
enum TcpProxyEntryTransportType {
|
||||
TCP = 0;
|
||||
KCP = 1;
|
||||
QUIC = 2;
|
||||
}
|
||||
|
||||
enum TcpProxyEntryState {
|
||||
|
||||
@@ -33,6 +33,16 @@ message FlagsInConfig {
|
||||
|
||||
// enable magic dns or not
|
||||
bool accept_dns = 22;
|
||||
// enable private mode
|
||||
bool private_mode = 23;
|
||||
|
||||
// should we convert all tcp streams into quic streams
|
||||
bool enable_quic_proxy = 24;
|
||||
// does this peer allow quic input
|
||||
bool disable_quic_input = 25;
|
||||
|
||||
// a global relay limit, only work for foreign network
|
||||
uint64 foreign_relay_bps_limit = 26;
|
||||
}
|
||||
|
||||
message RpcDescriptor {
|
||||
@@ -169,3 +179,13 @@ message PortForwardConfigPb {
|
||||
SocketAddr dst_addr = 2;
|
||||
SocketType socket_type = 3;
|
||||
}
|
||||
|
||||
message ProxyDstInfo {
|
||||
SocketAddr dst_addr = 1;
|
||||
}
|
||||
|
||||
message LimiterConfig {
|
||||
optional uint64 burst_rate = 1; // default 1 means no burst (capacity is same with bps)
|
||||
optional uint64 bps = 2; // default 0 means no limit (unit is B/s)
|
||||
optional uint64 fill_duration_ms = 3; // default 10ms, the period to fill the bucket
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ message RoutePeerInfo {
|
||||
uint64 peer_route_id = 12;
|
||||
|
||||
uint32 network_length = 13;
|
||||
|
||||
optional uint32 quic_port = 14;
|
||||
}
|
||||
|
||||
message PeerIdVersion {
|
||||
@@ -46,6 +48,7 @@ message ForeignNetworkRouteInfoEntry {
|
||||
google.protobuf.Timestamp last_update = 2;
|
||||
uint32 version = 3;
|
||||
bytes network_secret_digest = 4;
|
||||
uint32 my_peer_id_for_this_network = 5;
|
||||
}
|
||||
|
||||
message RouteForeignNetworkInfos {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user