Compare commits

...

165 Commits

Author SHA1 Message Date
Sijie.Sun
b43c078152 fix udp proxy not work when being exit node (#133) 2024-06-05 08:08:55 +08:00
Sijie.Sun
6e77e6b5e7 support start on reboot (#132)
* move launcher to eastier lib
* support auto start after reboot
2024-06-04 23:06:10 +08:00
Sijie.Sun
f9e6264f31 fix upx and udp conn counter (#131)
* fix upx in workflow
* fix udp conn counter
2024-06-04 18:50:30 +08:00
Sijie.Sun
df17a7bb68 bugfix before release 11x (#130)
* use correct i18n hook

* fix peer rpc panic

make sure server use correct transact id

* fix dhcp

recreate tun device after ip changed

* use upx correctly

* compile arm & armv7

* prepare to release v1.1.0
2024-06-03 23:07:44 +08:00
Sijie.Sun
c1b725e64e websocket support bind addr (#129) 2024-06-02 21:48:16 +08:00
Sijie.Sun
360691276c support win7 and reduce win mem usage (#128) 2024-06-02 14:07:21 +08:00
Sijie.Sun
f1e9864d08 make release bin smaller (#127) 2024-06-02 09:15:40 +08:00
Sijie.Sun
abf9d23d52 improve hole punching and stun test (#124)
* implement new stun test algorithm, do test faster and provide more info
* support punching for symmetric
2024-06-02 07:20:57 +08:00
Sijie.Sun
bdbb1f02d6 Update Cargo.lock (#122) 2024-05-22 00:36:16 +08:00
Sijie.Sun
f64f58e2ae support exit node (#121)
support exit node, proxy all traffic via one of node
NOTE: this patch has not implemented automatically route management.
2024-05-18 20:32:42 +08:00
Sijie.Sun
6efbb5cb3d minor fixed (#120)
1. fix mtu, always set by ourselves and use smaller value
2. wireguard connector should return tunnel after receive packet
2024-05-18 18:04:06 +08:00
m1m1sha
0ead308392 Feat/pseudo dhcp (#109)
*  feat: pseudo dhcp
2024-05-17 23:16:56 +08:00
Sijie.Sun
bad6a5946a fix run with config, update readme (#118) 2024-05-17 22:00:11 +08:00
Sijie.Sun
7532a7c1b2 command line improvement (#115)
make -l easy to use:
-l wg wss
-l wg:12345
-l 12345

make -r use random port
2024-05-16 20:16:09 +08:00
Sijie.Sun
f665de9b18 allow peer rpc split packet, so tunnel mtu can be small (#116) 2024-05-16 08:18:35 +08:00
m1m1sha
58d2ec475c 🐞 fix: cmd error with gbk (#114)
* 🐞 fix: cmd error with gbk
* 🎈 perf: try gbk only on windows
2024-05-15 20:50:11 +08:00
Sijie.Sun
d5bf041834 some minor fix (#113)
1. fix ospf route panic if no ipv4 assigned.
2. should refetch global peer latency map every 60s
3. remove regex dep because it's too large and unnecessary.
2024-05-15 09:21:20 +08:00
Sijie.Sun
4e9b07f83b Merge pull request #108 from EasyTier/latency_first 2024-05-13 22:30:50 +08:00
sijie.sun
fc4e3782bd tune command line args 2024-05-13 22:13:31 +08:00
sijie.sun
3e6b1ac384 use path with least cost if hop count is same 2024-05-13 21:18:52 +08:00
sijie.sun
29365c39ed use latency from peer center for route 2024-05-13 21:18:34 +08:00
sijie.sun
09ebed157e fix peer center for latency report 2024-05-13 20:30:25 +08:00
sijie.sun
72f86025bd support custom cost calculate func when generating route table 2024-05-13 20:30:25 +08:00
sijie.sun
51aa23b635 add ttl for packet 2024-05-13 20:30:25 +08:00
m1m1sha
43e076ef18 🐞 fix: same tun name 2024-05-13 16:11:37 +08:00
sijie.sun
29d8d4ba87 correctly handle listener add fail 2024-05-11 23:29:55 +08:00
sijie.sun
1b1d76de99 introduce websocket tunnel 2024-05-11 23:29:55 +08:00
m1m1sha
a5637003ad Perf/optimize details (#106)
* 🎈 perf: details
* 🎈 perf: optimize Style
2024-05-11 16:26:44 +08:00
Sijie.Sun
65ac991d1c (Tyr) fix flashing console window on windows (#105)
add requireAdmin in app manist
2024-05-11 12:02:15 +08:00
sijie.sun
0926820849 fix workflow status check for matrix build 2024-05-11 00:39:35 +08:00
sijie.sun
518b6e277a networkList should not be empty after first start 2024-05-11 00:39:35 +08:00
Sijie.Sun
2deb867678 move shared codes in workflows to script (#103) 2024-05-10 22:56:00 +08:00
Sijie.Sun
e023c05440 Merge pull request #102 from EasyTier/fix-gui-workflow
rename workflow job name for gui
2024-05-10 22:39:23 +08:00
m1m1sha
486286e497 🐎 ci: change trigger
change the triggering mechanism to skip jobs upon detecting changes
2024-05-10 22:25:37 +08:00
Sijie.Sun
72701c9eb3 start tcp proxy after tun device created (#94)
on win 10, tcp proxy listener created before tun device may not accept
conn from tun dev.
2024-05-10 21:40:50 +08:00
Sijie.Sun
b1153378c9 fix icmp proxy on MacOS (#101)
1. MacOS doesn't fill length field in ip header when recving from raw
socket
2. Fix udp & icmp subnet proxy not work when no p2p connection.
2024-05-10 21:40:29 +08:00
sijie.sun
ab0404bf6e rename job and artifect name for easytier-gui workflow 2024-05-10 20:40:49 +08:00
m1m1sha
2a728482fa 🐎 ci: modify action on paths and split the steps (#96)
* 🐎 ci: modify action on paths and split the steps
2024-05-10 17:44:16 +08:00
m1m1sha
bee9565225 Merge pull request #100 from m1m1sha/perf/ts-type
Perf/ts type
2024-05-10 15:25:29 +08:00
m1m1sha
e07f760def 🎈 perf: simplify format 2024-05-10 11:56:18 +08:00
m1m1sha
24e2f41260 Merge branch 'EasyTier:main' into perf/ts-type 2024-05-10 00:16:39 +08:00
Yumin Wu
4da7f4ec20 fix AllowIps and Address fields for WireGuard client (#99)
- add Wireguard client cidr into AllowIps
- change subnet number to 32 in Address field
2024-05-09 22:01:55 +08:00
Sijie.Sun
7d3b8e42fe move reconn task join into select! (#88)
if join_next stuck, may miss global event and cause panic
2024-05-09 18:51:58 +08:00
Sijie.Sun
68c077820f Merge pull request #97 from wuyumin/yumin-dev 2024-05-09 18:26:34 +08:00
Yumin Wu
b4ebe7a481 update .gitignore 2024-05-09 17:56:00 +08:00
Yumin Wu
b1f8c5c175 update release profile 2024-05-09 17:18:20 +08:00
Yumin Wu
469187d0bb temporary version(v1.0.0 is already published) 2024-05-09 15:20:49 +08:00
Yumin Wu
770ab4a01b command friendly tips 2024-05-09 15:06:32 +08:00
Yumin Wu
e4146c3f92 release reduce size 2024-05-09 15:04:27 +08:00
Yumin Wu
8e841bf5b5 fixed version 2024-05-09 15:02:28 +08:00
Sijie.Sun
076f6cd965 Merge pull request #93 from wuyumin/yumin-dev
update  files for compiler
2024-05-08 22:29:45 +08:00
Yumin Wu
801104ca69 add target 2024-05-08 21:52:59 +08:00
Yumin Wu
5d5d8b122a rename config for IDE 2024-05-08 21:51:37 +08:00
Yumin Wu
4387d49a42 update Cargo.lock 2024-05-08 21:50:38 +08:00
Sijie.Sun
2d394acc47 Merge pull request #90 from m1m1sha/feat/custom-hostname
Feat/custom hostname
2024-05-08 21:44:01 +08:00
Sijie.Sun
e1e10b24e6 Merge pull request #92 from wuyumin/main 2024-05-08 21:07:56 +08:00
m1m1sha
52fef9fd4f 🎈 perf: 主机名提示显示本机主机名 2024-05-08 21:02:14 +08:00
m1m1sha
e6ad308cd5 ↩ revert: 兼容性 2024-05-08 20:49:33 +08:00
m1m1sha
bf6b46ec8e 🎈 perf: func 2024-05-08 19:09:39 +08:00
m1m1sha
da0777293f 🎈 perf: ts type 2024-05-08 18:58:17 +08:00
Yumin Wu
4ca840239a wireguard client keepalive 2024-05-08 17:40:43 +08:00
m1m1sha
30ccfab288 🐞 fix: hostname empty 2024-05-08 16:18:09 +08:00
m1m1sha
bde5b7f6ea 🎈 perf: get hostname 2024-05-08 16:06:11 +08:00
m1m1sha
6448955e05 🌈 style: 去除表格抖动 2024-05-08 14:48:23 +08:00
m1m1sha
0498b55d39 feat: custom hostname 2024-05-08 14:47:22 +08:00
Sijie.Sun
c3df9ea7fa Merge pull request #84 from m1m1sha/perf/gui-front-perf
Optimize the GUI front-end project structure
2024-05-08 00:25:20 +08:00
m1m1sha
6f437bf4c3 Merge branch 'perf/gui-front-perf' of https://github.com/m1m1sha/easytier into perf/gui-front-perf 2024-05-07 23:59:25 +08:00
m1m1sha
74f01e9800 🐳 chore: eslint config 2024-05-07 23:50:01 +08:00
m1m1sha
5cbe59219d 🐳 chore: 修改工作区配置
move the gui workspace configuration from the main workspace to the gui workspace to avoid issues such as plugin warnings
2024-05-07 23:49:17 +08:00
m1m1sha
1db1fbc03b 🐳 chore: vsc recommendations 2024-05-07 23:48:10 +08:00
m1m1sha
836a90e4d7 🌈 style: 清理依赖 2024-05-07 23:48:10 +08:00
m1m1sha
bc64b05e18 🐳 chore: vsc workspace 2024-05-07 23:48:00 +08:00
m1m1sha
1170f758c1 🌈 style: eslint lint 2024-05-07 23:47:42 +08:00
m1m1sha
0b3ff3ced3 🐳 chore: eslint 2024-05-07 23:47:21 +08:00
m1m1sha
060b11578f 🐳 chore: 增加依赖 2024-05-07 23:42:52 +08:00
m1m1sha
d4d352a36f 🐳 chore: pnpm lock 2024-05-07 23:39:38 +08:00
m1m1sha
c768e1d13b 🐞 fix: 全局作用域中异步加载语言 2024-05-07 23:39:38 +08:00
m1m1sha
5605d239ce 🐳 chore: eslint config and script
`eslint` 只忽略 `tauri` 文件目录
增加 `eslint` 自动修复命令
2024-05-07 23:39:38 +08:00
m1m1sha
831ede7d35 🌈 style: lint 2024-05-07 23:39:38 +08:00
m1m1sha
97e8cbb9ed 🐞 fix: 不可使用顶级 await 2024-05-07 23:39:38 +08:00
m1m1sha
705c34623c 🐳 chore: eslint 命令行忽略文件
由于未知原因导致 eslint 配置项中 ignores 未生效,暂时使用命令行代替
2024-05-07 23:39:38 +08:00
m1m1sha
42f933dfc3 🐞 fix: i18n 读写 key 不一致 2024-05-07 23:39:38 +08:00
m1m1sha
d2f89bb0ac 🐳 chore: eslint config 2024-05-07 23:39:38 +08:00
m1m1sha
114208081f 🐳 chore: 修改工作区配置
move the gui workspace configuration from the main workspace to the gui workspace to avoid issues such as plugin warnings
2024-05-07 23:39:38 +08:00
m1m1sha
bd484eb7fe 🐳 chore: vsc recommendations 2024-05-07 23:39:38 +08:00
m1m1sha
d44b63d45f 🌈 style: 清理依赖 2024-05-07 23:39:38 +08:00
m1m1sha
307a0c7b3c 🐳 chore: vsc workspace 2024-05-07 23:39:38 +08:00
m1m1sha
c66939249f 🌈 style: eslint lint 2024-05-07 23:39:38 +08:00
m1m1sha
6f75dd72b9 🐳 chore: eslint 2024-05-07 23:39:38 +08:00
m1m1sha
e6408f2582 🎈 perf: 修改多语言图标 2024-05-07 23:39:38 +08:00
m1m1sha
934cfce1b0 🐞 fix: 可能使用不存在的语言 2024-05-07 23:39:38 +08:00
m1m1sha
76292a8377 🎈 perf: 移除无用tsconfig 2024-05-07 23:39:38 +08:00
m1m1sha
20c509da77 🎈 perf: 拆分main 2024-05-07 23:39:38 +08:00
m1m1sha
584d924433 🎈 perf: 更新引入 2024-05-07 23:39:38 +08:00
m1m1sha
740d2938f5 🎈 perf: 使用路径路由 2024-05-07 23:39:38 +08:00
m1m1sha
7314309750 🎈 perf: 拆分composable 2024-05-07 23:39:38 +08:00
m1m1sha
af3e1634d1 🎈 perf: 拆分store 2024-05-07 23:39:38 +08:00
m1m1sha
376d533527 🎈 perf: 拆分i18n 2024-05-07 23:39:38 +08:00
m1m1sha
f583fea5e4 🎈 perf: 拆分type 2024-05-07 23:39:38 +08:00
m1m1sha
14a391d4fc 🐳 chore: 增加依赖 2024-05-07 23:39:38 +08:00
Sijie.Sun
14df3d3075 mips support wireguard (#87) 2024-05-07 23:14:29 +08:00
m1m1sha
0fa7895301 🐳 chore: pnpm lock update 2024-05-07 23:10:33 +08:00
m1m1sha
b9c4cd25a6 Merge branch 'perf/gui-front-perf' of https://github.com/m1m1sha/easytier into perf/gui-front-perf 2024-05-07 23:09:53 +08:00
m1m1sha
ecdf9f34ea 🐳 chore: pnpm lock 2024-05-07 23:04:06 +08:00
m1m1sha
5b14fc05d2 🐞 fix: 全局作用域中异步加载语言 2024-05-07 23:01:23 +08:00
m1m1sha
6089813da5 🐳 chore: eslint config and script
`eslint` 只忽略 `tauri` 文件目录
增加 `eslint` 自动修复命令
2024-05-07 23:01:06 +08:00
m1m1sha
189a073f05 🌈 style: lint 2024-05-07 23:01:06 +08:00
m1m1sha
a6b8f2023c 🐞 fix: 不可使用顶级 await 2024-05-07 23:01:06 +08:00
m1m1sha
9c390230f5 🐳 chore: eslint 命令行忽略文件
由于未知原因导致 eslint 配置项中 ignores 未生效,暂时使用命令行代替
2024-05-07 23:01:06 +08:00
m1m1sha
36436b597f 🐞 fix: i18n 读写 key 不一致 2024-05-07 23:01:06 +08:00
m1m1sha
f0c7b3a9bf 🐳 chore: eslint config 2024-05-07 23:01:06 +08:00
m1m1sha
cbbd8a2b8c 🐳 chore: 修改工作区配置
move the gui workspace configuration from the main workspace to the gui workspace to avoid issues such as plugin warnings
2024-05-07 22:58:42 +08:00
m1m1sha
3f44f48814 🐳 chore: vsc recommendations 2024-05-07 22:58:42 +08:00
m1m1sha
1a1549cdc7 🌈 style: 清理依赖 2024-05-07 22:58:42 +08:00
m1m1sha
eafff8439c 🐳 chore: vsc workspace 2024-05-07 22:57:56 +08:00
m1m1sha
c37fc13404 🌈 style: eslint lint 2024-05-07 22:57:56 +08:00
m1m1sha
8b94b3cab0 🐳 chore: eslint 2024-05-07 22:57:56 +08:00
m1m1sha
37f01f2898 🎈 perf: 修改多语言图标 2024-05-07 22:57:56 +08:00
m1m1sha
cd3387357b 🐞 fix: 可能使用不存在的语言 2024-05-07 22:57:56 +08:00
m1m1sha
59ccb38db2 🎈 perf: 移除无用tsconfig 2024-05-07 22:57:56 +08:00
m1m1sha
39fcbf91d5 🎈 perf: 拆分main 2024-05-07 22:57:56 +08:00
m1m1sha
e3c82dbbc8 🎈 perf: 更新引入 2024-05-07 22:57:56 +08:00
m1m1sha
be67330c24 🎈 perf: 使用路径路由 2024-05-07 22:57:56 +08:00
m1m1sha
795b8ec1d0 🎈 perf: 拆分composable 2024-05-07 22:57:56 +08:00
m1m1sha
856cd33f26 🎈 perf: 拆分store 2024-05-07 22:57:56 +08:00
m1m1sha
0b30bdf4a0 🎈 perf: 拆分i18n 2024-05-07 22:57:56 +08:00
m1m1sha
11a3f786cb 🎈 perf: 拆分type 2024-05-07 22:57:56 +08:00
m1m1sha
0b389afd22 🐳 chore: 增加依赖 2024-05-07 22:57:56 +08:00
m1m1sha
1280e1dde2 replace yarn with pnpm (#85)
* 🐳 chore: replace yarn with pnpm
2024-05-07 22:40:09 +08:00
m1m1sha
d10917d47d 🐞 fix: 全局作用域中异步加载语言 2024-05-07 15:24:51 +08:00
m1m1sha
fb2a6d9b17 🐳 chore: eslint config and script
`eslint` 只忽略 `tauri` 文件目录
增加 `eslint` 自动修复命令
2024-05-07 14:33:26 +08:00
m1m1sha
a8c4b1feac 🌈 style: lint 2024-05-07 14:30:40 +08:00
m1m1sha
c0dc9a493d 🐞 fix: 不可使用顶级 await 2024-05-07 13:43:08 +08:00
m1m1sha
83baf2fdc7 🐳 chore: eslint 命令行忽略文件
由于未知原因导致 eslint 配置项中 ignores 未生效,暂时使用命令行代替
2024-05-07 10:46:58 +08:00
m1m1sha
8188585edd 🐞 fix: i18n 读写 key 不一致 2024-05-07 10:40:12 +08:00
m1m1sha
e9a625ec5f 🐳 chore: eslint config 2024-05-07 10:39:06 +08:00
Sijie.Sun
8440eb842b fix bugs and improve user experiance (#86)
* correctly set mtu, and allow set mtu manually

* communicate between enc and non-enc should not panic

* allow loading cfg from file

* allow change file log level dynamically
2024-05-07 00:38:05 +08:00
m1m1sha
2e57599f41 🐳 chore: 修改工作区配置
move the gui workspace configuration from the main workspace to the gui workspace to avoid issues such as plugin warnings
2024-05-06 14:26:32 +08:00
m1m1sha
3abdca31f2 🐳 chore: vsc recommendations 2024-05-06 13:06:42 +08:00
m1m1sha
8d1e99da05 🌈 style: 清理依赖 2024-05-06 12:59:03 +08:00
m1m1sha
f72033e7f6 🐳 chore: vsc workspace 2024-05-06 12:52:12 +08:00
m1m1sha
57dce76363 🌈 style: eslint lint 2024-05-06 11:08:51 +08:00
m1m1sha
6c00ed4276 🐳 chore: eslint 2024-05-06 10:49:53 +08:00
m1m1sha
9e5bdf74bc 🎈 perf: 修改多语言图标 2024-05-06 09:49:59 +08:00
m1m1sha
893fba4adf 🐞 fix: 可能使用不存在的语言 2024-05-06 09:05:40 +08:00
m1m1sha
e7092bfcf6 🎈 perf: 移除无用tsconfig 2024-05-05 23:14:52 +08:00
m1m1sha
26c59d3507 🎈 perf: 拆分main 2024-05-05 23:14:37 +08:00
m1m1sha
c6660986c4 🎈 perf: 更新引入 2024-05-05 23:14:22 +08:00
m1m1sha
26d1482131 🎈 perf: 使用路径路由 2024-05-05 23:13:39 +08:00
m1m1sha
9dd44038bc 🎈 perf: 拆分composable 2024-05-05 23:12:54 +08:00
m1m1sha
06a0957734 🎈 perf: 拆分store 2024-05-05 23:12:19 +08:00
m1m1sha
6428f23dce 🎈 perf: 拆分i18n 2024-05-05 23:12:02 +08:00
m1m1sha
8604724ff7 🎈 perf: 拆分type 2024-05-05 23:11:00 +08:00
m1m1sha
fda056528b 🐳 chore: 增加依赖 2024-05-05 23:10:42 +08:00
Sijie.Sun
e5b537267e bug fix and improve (#81)
1. fix manual connector do not retry if dns resolve failed.
2. allow not creating tun device if no virtual ipv4 is assigned.
2024-05-05 16:18:05 +08:00
m1m1sha
638013a93d 🎈 perf: hidden cmd windows (#79)
* 🎈 perf: hidden cmd window, use CREATE_NO_WINDOW flag when exec shell cmd.
2024-05-05 15:33:05 +08:00
m1m1sha
064a009cb4 🐞 fix: Unable to correctly locate protoc in PATH 2024-05-05 13:02:12 +08:00
m1m1sha
0af32526f7 🐞 fix: 修复nightly错误 (https://github.com/KKRainbow/EasyTier/issues/74) (#75)
* 🐞 fix: 修复 1.80 nightly 编译错误错误

TODO: need fork boringtun and publish to crates.io before publishing easytier 

issues: https://github.com/KKRainbow/EasyTier/issues/74
2024-05-05 11:46:10 +08:00
Sijie.Sun
714667fdce Update rust.yml (#76)
fix [PR from fork cannot use github secret directly](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions#using-secrets-in-a-workflow)
2024-05-05 11:06:39 +08:00
Sijie.Sun
3a5332e31d use mimalloc for mips/mipsel (#71) 2024-05-04 00:26:57 +08:00
Sijie.Sun
61d5e38cc9 Update README.md (#70) 2024-05-03 21:50:48 +08:00
Sijie.Sun
3763c959db Merge pull request #68 from KKRainbow/mips
Add mips / mipsel support

- mips do not support wireguard.
- mips use aes-gcm crates instead of ring crates for encryption
2024-05-03 17:28:10 +08:00
sijie.sun
873851e6d0 mips 2024-05-03 17:09:46 +08:00
Sijie.Sun
ebbed97ed5 bump version in Carto.toml to v1.0.0 (#67) 2024-05-03 07:57:05 +08:00
sijie.sun
1be6db661e bump version in Carto.toml to v1.0.0 2024-05-02 23:28:38 +08:00
104 changed files with 13857 additions and 5677 deletions

View File

@@ -1,7 +0,0 @@
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[target.aarch64-unknown-linux-musl]
linker = "aarch64-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "target-feature=+crt-static"]

77
.cargo/config.toml Normal file
View File

@@ -0,0 +1,77 @@
[target.x86_64-unknown-linux-musl]
linker = "rust-lld"
rustflags = ["-C", "linker-flavor=ld.lld"]
[target.aarch64-unknown-linux-gnu]
linker = "aarch64-linux-gnu-gcc"
[target.aarch64-unknown-linux-musl]
linker = "aarch64-linux-musl-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.'cfg(all(windows, target_env = "msvc"))']
rustflags = ["-C", "target-feature=+crt-static"]
[target.mipsel-unknown-linux-musl]
linker = "mipsel-linux-muslsf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/mipsel-linux-muslsf-cross/mipsel-linux-muslsf/lib",
"-L",
"./musl_gcc/mipsel-linux-muslsf-cross/lib/gcc/mipsel-linux-muslsf/11.2.1",
"-l",
"atomic",
"-l",
"ctz",
]
[target.mips-unknown-linux-musl]
linker = "mips-linux-muslsf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/mips-linux-muslsf-cross/mips-linux-muslsf/lib",
"-L",
"./musl_gcc/mips-linux-muslsf-cross/lib/gcc/mips-linux-muslsf/11.2.1",
"-l",
"atomic",
"-l",
"ctz",
]
[target.armv7-unknown-linux-musleabihf]
linker = "armv7l-linux-musleabihf-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.armv7-unknown-linux-musleabi]
linker = "armv7m-linux-musleabi-gcc"
rustflags = ["-C", "target-feature=+crt-static"]
[target.arm-unknown-linux-musleabihf]
linker = "arm-linux-musleabihf-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/arm-linux-musleabihf-cross/arm-linux-musleabihf/lib",
"-L",
"./musl_gcc/arm-linux-musleabihf-cross/lib/gcc/arm-linux-musleabihf/11.2.1",
"-l",
"atomic",
]
[target.arm-unknown-linux-musleabi]
linker = "arm-linux-musleabi-gcc"
rustflags = [
"-C",
"target-feature=+crt-static",
"-L",
"./musl_gcc/arm-linux-musleabi-cross/arm-linux-musleabi/lib",
"-L",
"./musl_gcc/arm-linux-musleabi-cross/lib/gcc/arm-linux-musleabi/11.2.1",
"-l",
"atomic",
]

163
.github/workflows/core.yml vendored Normal file
View File

@@ -0,0 +1,163 @@
name: EasyTier Core
on:
push:
branches: ["develop", "main"]
pull_request:
branches: ["develop", "main"]
env:
CARGO_TERM_COLOR: always
defaults:
run:
# necessary for windows
shell: bash
jobs:
pre_job:
# continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest
# Map a step output to a job output
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
# All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: 'never'
skip_after_successful_duplicate: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/core.yml"]'
build:
strategy:
fail-fast: false
matrix:
include:
- TARGET: aarch64-unknown-linux-musl
OS: ubuntu-latest
- TARGET: x86_64-unknown-linux-musl
OS: ubuntu-latest
- TARGET: mips-unknown-linux-musl
OS: ubuntu-latest
- TARGET: mipsel-unknown-linux-musl
OS: ubuntu-latest
- TARGET: x86_64-apple-darwin
OS: macos-latest
- TARGET: aarch64-apple-darwin
OS: macos-latest
- TARGET: x86_64-pc-windows-msvc
OS: windows-latest
- TARGET: armv7-unknown-linux-musleabihf # raspberry pi 2-3-4, not tested
OS: ubuntu-latest
- TARGET: armv7-unknown-linux-musleabi # raspberry pi 2-3-4, not tested
OS: ubuntu-latest
- TARGET: arm-unknown-linux-musleabihf # raspberry pi 0-1, not tested
OS: ubuntu-latest
- TARGET: arm-unknown-linux-musleabi # raspberry pi 0-1, not tested
OS: ubuntu-latest
runs-on: ${{ matrix.OS }}
env:
NAME: easytier
TARGET: ${{ matrix.TARGET }}
OS: ${{ matrix.OS }}
OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }}
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v4
with:
node-version: 21
- name: Cargo cache
uses: actions/cache@v4
with:
path: |
~/.cargo
./target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install rust target
run: bash ./.github/workflows/install_rust.sh
- name: Setup protoc
uses: arduino/setup-protoc@v2
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Build Core & Cli
run: |
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cargo +nightly build -r --verbose --target $TARGET -Z build-std=std,panic_abort --no-default-features --features mips
else
cargo build --release --verbose --target $TARGET
fi
- name: Install UPX
if: ${{ matrix.OS != 'macos-latest' }}
uses: crazy-max/ghaction-upx@v3
with:
version: latest
install-only: true
- name: Compress
run: |
mkdir -p ./artifacts/objects/
# windows is the only OS using a different convention for executable file name
if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe
cp easytier/third_party/Packet.dll ./artifacts/objects/
cp easytier/third_party/wintun.dll ./artifacts/objects/
fi
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
TAG=$GITHUB_REF_NAME
else
TAG=$GITHUB_SHA
fi
if [[ ! $OS =~ ^macos.*$ ]]; then
upx --lzma --best ./target/$TARGET/release/easytier-core"$SUFFIX"
upx --lzma --best ./target/$TARGET/release/easytier-cli"$SUFFIX"
fi
mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/
mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/
tar -cvf ./artifacts/$NAME-$TARGET-$TAG.tar -C ./artifacts/objects/ .
rm -rf ./artifacts/objects/
- name: Archive artifact
uses: actions/upload-artifact@v4
with:
name: easytier-${{ matrix.OS }}-${{ matrix.TARGET }}
path: |
./artifacts/*
- name: Upload OSS
if: ${{ env.OSS_BUCKET != '' }}
uses: Menci/upload-to-oss@main
with:
access-key-id: ${{ secrets.ALIYUN_OSS_ACCESS_ID }}
access-key-secret: ${{ secrets.ALIYUN_OSS_ACCESS_KEY }}
endpoint: ${{ secrets.ALIYUN_OSS_ENDPOINT }}
bucket: ${{ secrets.ALIYUN_OSS_BUCKET }}
local-path: ./artifacts/
remote-path: /easytier-releases/${{ github.sha }}/
no-delete-remote-files: true
retry: 5
core-result:
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest
needs:
- pre_job
- build
steps:
- name: Mark result as failed
if: needs.build.result != 'success'
run: exit 1

204
.github/workflows/gui.yml vendored Normal file
View File

@@ -0,0 +1,204 @@
name: EasyTier GUI
on:
push:
branches: ["develop", "main"]
pull_request:
branches: ["develop", "main"]
env:
CARGO_TERM_COLOR: always
defaults:
run:
# necessary for windows
shell: bash
jobs:
pre_job:
# continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest
# Map a step output to a job output
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
# All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: 'never'
skip_after_successful_duplicate: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", "easytier-gui/**", ".github/workflows/gui.yml"]'
build-gui:
strategy:
fail-fast: false
matrix:
include:
- TARGET: aarch64-unknown-linux-musl
OS: ubuntu-latest
GUI_TARGET: aarch64-unknown-linux-gnu
- TARGET: x86_64-unknown-linux-musl
OS: ubuntu-latest
GUI_TARGET: x86_64-unknown-linux-gnu
- TARGET: x86_64-apple-darwin
OS: macos-latest
GUI_TARGET: x86_64-apple-darwin
- TARGET: aarch64-apple-darwin
OS: macos-latest
GUI_TARGET: aarch64-apple-darwin
- TARGET: x86_64-pc-windows-msvc
OS: windows-latest
GUI_TARGET: x86_64-pc-windows-msvc
runs-on: ${{ matrix.OS }}
env:
NAME: easytier
TARGET: ${{ matrix.TARGET }}
OS: ${{ matrix.OS }}
GUI_TARGET: ${{ matrix.GUI_TARGET }}
OSS_BUCKET: ${{ secrets.ALIYUN_OSS_BUCKET }}
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v4
with:
node-version: 21
- name: Install pnpm
uses: pnpm/action-setup@v3
with:
version: 9
run_install: false
- name: Get pnpm store directory
shell: bash
run: |
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
- name: Setup pnpm cache
uses: actions/cache@v4
with:
path: ${{ env.STORE_PATH }}
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-store-
- name: Install frontend dependencies
run: |
cd easytier-gui
pnpm install
- name: Cargo cache
uses: actions/cache@v4
with:
path: |
~/.cargo
./target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install rust target
run: bash ./.github/workflows/install_rust.sh
- name: Setup protoc
uses: arduino/setup-protoc@v2
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install GUI cross compile (aarch64 only)
if: ${{ matrix.TARGET == 'aarch64-unknown-linux-musl' }}
run: |
# see https://tauri.app/v1/guides/building/linux/
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
sudo dpkg --add-architecture arm64
sudo apt-get update && sudo apt-get upgrade -y
sudo apt install libwebkit2gtk-4.0-dev:arm64
sudo apt install libssl-dev:arm64
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
- name: Build GUI
if: ${{ matrix.GUI_TARGET != '' }}
uses: tauri-apps/tauri-action@v0
with:
projectPath: ./easytier-gui
# https://tauri.app/v1/guides/building/linux/#cross-compiling-tauri-applications-for-arm-based-devices
args: --verbose --target ${{ matrix.GUI_TARGET }} ${{ matrix.OS == 'ubuntu-latest' && contains(matrix.TARGET, 'aarch64') && '--bundles deb' || '' }}
- name: Compress
run: |
mkdir -p ./artifacts/objects/
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
TAG=$GITHUB_REF_NAME
else
TAG=$GITHUB_SHA
fi
# copy gui bundle, gui is built without specific target
if [[ $OS =~ ^windows.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/nsis/*.exe ./artifacts/objects/
elif [[ $OS =~ ^macos.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/dmg/*.dmg ./artifacts/objects/
elif [[ $OS =~ ^ubuntu.*$ && ! $TARGET =~ ^mips.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/deb/*.deb ./artifacts/objects/
if [[ $GUI_TARGET =~ ^x86_64.*$ ]]; then
# currently only x86 appimage is supported
mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
fi
fi
tar -cvf ./artifacts/$NAME-$TARGET-$TAG.tar -C ./artifacts/objects/ .
rm -rf ./artifacts/objects/
- name: Archive artifact
uses: actions/upload-artifact@v4
with:
name: easytier-gui-${{ matrix.OS }}-${{ matrix.TARGET }}
path: |
./artifacts/*
- name: Upload OSS
if: ${{ env.OSS_BUCKET != '' }}
uses: Menci/upload-to-oss@main
with:
access-key-id: ${{ secrets.ALIYUN_OSS_ACCESS_ID }}
access-key-secret: ${{ secrets.ALIYUN_OSS_ACCESS_KEY }}
endpoint: ${{ secrets.ALIYUN_OSS_ENDPOINT }}
bucket: ${{ secrets.ALIYUN_OSS_BUCKET }}
local-path: ./artifacts/
remote-path: /easytier-releases/${{ github.sha }}/gui
no-delete-remote-files: true
retry: 5
gui-result:
if: needs.pre_job.outputs.should_skip != 'true' && always()
runs-on: ubuntu-latest
needs:
- pre_job
- build-gui
steps:
- name: Mark result as failed
if: needs.build-gui.result != 'success'
run: exit 1

81
.github/workflows/install_rust.sh vendored Normal file
View File

@@ -0,0 +1,81 @@
#!/usr/bin/env bash
# env needed:
# - TARGET
# - GUI_TARGET
# - OS
# dependencies are only needed on ubuntu as that's the only place where
# we make cross-compilation
if [[ $OS =~ ^ubuntu.*$ ]]; then
sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools
# for easytier-gui
if [[ $GUI_TARGET != '' ]]; then
sudo apt install libwebkit2gtk-4.0-dev \
build-essential \
curl \
wget \
file \
libssl-dev \
libgtk-3-dev \
libayatana-appindicator3-dev \
librsvg2-dev \
patchelf
fi
# curl -s musl.cc | grep mipsel
case $TARGET in
mipsel-unknown-linux-musl)
MUSL_URI=mipsel-linux-muslsf
;;
mips-unknown-linux-musl)
MUSL_URI=mips-linux-muslsf
;;
aarch64-unknown-linux-musl)
MUSL_URI=aarch64-linux-musl
;;
armv7-unknown-linux-musleabihf)
MUSL_URI=armv7l-linux-musleabihf
;;
armv7-unknown-linux-musleabi)
MUSL_URI=armv7m-linux-musleabi
;;
arm-unknown-linux-musleabihf)
MUSL_URI=arm-linux-musleabihf
;;
arm-unknown-linux-musleabi)
MUSL_URI=arm-linux-musleabi
;;
esac
if [ -n "$MUSL_URI" ]; then
mkdir -p ./musl_gcc
wget -c https://musl.cc/${MUSL_URI}-cross.tgz -P ./musl_gcc/
tar zxf ./musl_gcc/${MUSL_URI}-cross.tgz -C ./musl_gcc/
sudo ln -s $(pwd)/musl_gcc/${MUSL_URI}-cross/bin/*gcc /usr/bin/
fi
fi
# see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable
rustup install 1.75
rustup default 1.75
# mips/mipsel cannot add target from rustup, need compile by ourselves
if [[ $OS =~ ^ubuntu.*$ && $TARGET =~ ^mips.*$ ]]; then
cd "$PWD/musl_gcc/${MUSL_URI}-cross/lib/gcc/${MUSL_URI}/11.2.1" || exit 255
# for panic-abort
cp libgcc_eh.a libunwind.a
# for mimalloc
ar x libgcc.a _ctzsi2.o _clz.o _bswapsi2.o
ar rcs libctz.a _ctzsi2.o _clz.o _bswapsi2.o
rustup toolchain install nightly-x86_64-unknown-linux-gnu
rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu
cd -
else
rustup target add $TARGET
if [[ $GUI_TARGET != '' ]]; then
rustup target add $GUI_TARGET
fi
fi

View File

@@ -1,236 +0,0 @@
name: Rust
on:
push:
branches: [ "develop", "main" ]
pull_request:
branches: [ "develop", "main" ]
env:
CARGO_TERM_COLOR: always
defaults:
run:
# necessary for windows
shell: bash
jobs:
build:
strategy:
fail-fast: false
matrix:
include:
- TARGET: aarch64-unknown-linux-musl
OS: ubuntu-latest
GUI_TARGET: aarch64-unknown-linux-gnu
- TARGET: x86_64-unknown-linux-musl
OS: ubuntu-latest
GUI_TARGET: x86_64-unknown-linux-gnu
- TARGET: x86_64-apple-darwin
OS: macos-latest
GUI_TARGET: x86_64-apple-darwin
- TARGET: aarch64-apple-darwin
OS: macos-latest
GUI_TARGET: aarch64-apple-darwin
- TARGET: x86_64-pc-windows-msvc
OS: windows-latest
GUI_TARGET: x86_64-pc-windows-msvc
runs-on: ${{ matrix.OS }}
env:
NAME: easytier
TARGET: ${{ matrix.TARGET }}
OS: ${{ matrix.OS }}
GUI_TARGET: ${{ matrix.GUI_TARGET }}
steps:
- uses: actions/checkout@v3
- name: Setup protoc
uses: arduino/setup-protoc@v2
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/setup-node@v4
with:
node-version: 21
cache: 'yarn'
cache-dependency-path: easytier-gui/yarn.lock
- name: Install Yarn
run: npm install -g yarn
- name: Cargo cache
uses: actions/cache@v4.0.0
with:
path: |
~/.cargo
./target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install rust target
run: |
# dependencies are only needed on ubuntu as that's the only place where
# we make cross-compilation
if [[ $OS =~ ^ubuntu.*$ ]]; then
sudo apt-get update && sudo apt-get install -qq crossbuild-essential-arm64 crossbuild-essential-armhf musl-tools
# for easytier-gui
sudo apt install libwebkit2gtk-4.0-dev \
build-essential \
curl \
wget \
file \
libssl-dev \
libgtk-3-dev \
libayatana-appindicator3-dev \
librsvg2-dev
# curl -s musl.cc | grep mipsel
case $TARGET in
mipsel-unknown-linux-musl)
MUSL_URI=mipsel-linux-musl-cross
;;
aarch64-unknown-linux-musl)
MUSL_URI=aarch64-linux-musl-cross
;;
armv7-unknown-linux-musleabihf)
MUSL_URI=armv7l-linux-musleabihf-cross
;;
arm-unknown-linux-musleabihf)
MUSL_URI=arm-linux-musleabihf-cross
;;
mips-unknown-linux-musl)
MUSL_URI=mips-linux-musl-cross
;;
esac
if [ -n "$MUSL_URI" ]; then
mkdir -p ./musl_gcc
wget -c https://musl.cc/$MUSL_URI.tgz -P ./musl_gcc/
tar zxf ./musl_gcc/$MUSL_URI.tgz -C ./musl_gcc/
sudo ln -s $(pwd)/musl_gcc/$MUSL_URI/bin/*gcc /usr/bin/
fi
fi
# see https://github.com/rust-lang/rustup/issues/3709
rustup set auto-self-update disable
rustup install 1.75
rustup default 1.75
rustup target add $TARGET
rustup target add $GUI_TARGET
- name: Run build
run: cargo build --release --verbose --target $TARGET
- name: Install for aarch64 gui cross compile
run: |
# see https://tauri.app/v1/guides/building/linux/
if [[ $TARGET == "aarch64-unknown-linux-musl" ]]; then
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy main restricted" | sudo tee /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=amd64] http://security.ubuntu.com/ubuntu/ jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-backports main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security main restricted" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security universe" | sudo tee -a /etc/apt/sources.list
echo "deb [arch=armhf,arm64] http://ports.ubuntu.com/ubuntu-ports jammy-security multiverse" | sudo tee -a /etc/apt/sources.list
sudo dpkg --add-architecture arm64
sudo apt-get update && sudo apt-get upgrade -y
sudo apt install libwebkit2gtk-4.0-dev:arm64
sudo apt install libssl-dev:arm64
echo "PKG_CONFIG_SYSROOT_DIR=/usr/aarch64-linux-gnu/" >> "$GITHUB_ENV"
fi
- name: Run build GUI
run: |
cd easytier-gui
yarn install
if [[ $OS =~ ^ubuntu.*$ && ! $GUI_TARGET =~ ^x86_64.*$ ]]; then
# only build deb for non-x86_64 linux
yarn tauri build -- --target $GUI_TARGET --verbose --bundles deb
else
yarn tauri build -- --target $GUI_TARGET --verbose
fi
- name: Compress
run: |
mkdir -p ./artifacts/objects/
# windows is the only OS using a different convention for executable file name
if [[ $OS =~ ^windows.*$ ]]; then
SUFFIX=.exe
cp easytier/third_party/Packet.dll ./artifacts/objects/
cp easytier/third_party/wintun.dll ./artifacts/objects/
fi
if [[ $GITHUB_REF_TYPE =~ ^tag$ ]]; then
TAG=$GITHUB_REF_NAME
else
TAG=$GITHUB_SHA
fi
mv ./target/$TARGET/release/easytier-core"$SUFFIX" ./artifacts/objects/
mv ./target/$TARGET/release/easytier-cli"$SUFFIX" ./artifacts/objects/
# copy gui bundle, gui is built without specific target
if [[ $OS =~ ^windows.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/nsis/*.exe ./artifacts/objects/
elif [[ $OS =~ ^macos.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/dmg/*.dmg ./artifacts/objects/
elif [[ $OS =~ ^ubuntu.*$ ]]; then
mv ./target/$GUI_TARGET/release/bundle/deb/*.deb ./artifacts/objects/
if [[ $GUI_TARGET =~ ^x86_64.*$ ]]; then
# currently only x86 appimage is supported
mv ./target/$GUI_TARGET/release/bundle/appimage/*.AppImage ./artifacts/objects/
fi
fi
tar -cvf ./artifacts/$NAME-$TARGET-$TAG.tar -C ./artifacts/objects/ .
rm -rf ./artifacts/objects/
- name: Archive artifact
uses: actions/upload-artifact@v4
with:
name: easytier-${{ matrix.OS }}-${{ matrix.TARGET }}
path: |
./artifacts/*
- name: Upload OSS
uses: Menci/upload-to-oss@main
with:
access-key-id: ${{ secrets.ALIYUN_OSS_ACCESS_ID }}
access-key-secret: ${{ secrets.ALIYUN_OSS_ACCESS_KEY }}
endpoint: ${{ secrets.ALIYUN_OSS_ENDPOINT }}
bucket: ${{ secrets.ALIYUN_OSS_BUCKET }}
local-path: ./artifacts/
remote-path: /easytier-releases/${{ github.sha }}/
no-delete-remote-files: true
retry: 5
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Setup protoc
uses: arduino/setup-protoc@v2
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup tools for test
run: sudo apt install bridge-utils
- name: Setup system for test
run: |
sudo sysctl net.bridge.bridge-nf-call-iptables=0
sudo sysctl net.bridge.bridge-nf-call-ip6tables=0
sudo sysctl net.ipv6.conf.lo.disable_ipv6=0
sudo ip addr add 2001:db8::2/64 dev lo
- name: Cargo cache
uses: actions/cache@v4.0.0
with:
path: |
~/.cargo
./target
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests
run: |
sudo -E env "PATH=$PATH" cargo test --verbose
sudo chown -R $USER:$USER ./target
sudo chown -R $USER:$USER ~/.cargo

67
.github/workflows/test.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: EasyTier Test
on:
push:
branches: ["develop", "main"]
pull_request:
branches: ["develop", "main"]
env:
CARGO_TERM_COLOR: always
defaults:
run:
# necessary for windows
shell: bash
jobs:
pre_job:
# continue-on-error: true # Uncomment once integration is finished
runs-on: ubuntu-latest
# Map a step output to a job output
outputs:
should_skip: ${{ steps.skip_check.outputs.should_skip }}
steps:
- id: skip_check
uses: fkirc/skip-duplicate-actions@v5
with:
# All of these options are optional, so you can remove them if you are happy with the defaults
concurrent_skipping: 'never'
skip_after_successful_duplicate: 'true'
paths: '["Cargo.toml", "Cargo.lock", "easytier/**", ".github/workflows/test.yml"]'
test:
runs-on: ubuntu-latest
needs: pre_job
if: needs.pre_job.outputs.should_skip != 'true'
steps:
- uses: actions/checkout@v3
- name: Setup protoc
uses: arduino/setup-protoc@v2
with:
# GitHub repo token to use to avoid rate limiter
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Setup tools for test
run: sudo apt install bridge-utils
- name: Setup system for test
run: |
sudo sysctl net.bridge.bridge-nf-call-iptables=0
sudo sysctl net.bridge.bridge-nf-call-ip6tables=0
sudo sysctl net.ipv6.conf.lo.disable_ipv6=0
sudo ip addr add 2001:db8::2/64 dev lo
- name: Cargo cache
uses: actions/cache@v4
with:
path: |
~/.cargo
./target
key: ${{ runner.os }}-cargo-test-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests
run: |
sudo -E env "PATH=$PATH" cargo test --no-default-features --features=full --verbose
sudo chown -R $USER:$USER ./target
sudo chown -R $USER:$USER ~/.cargo

5
.gitignore vendored
View File

@@ -24,3 +24,8 @@ nohup.out
.DS_Store
components.d.ts
musl_gcc
# log
easytier-panic.log

1658
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,13 @@
[workspace]
resolver = "2"
members = ["easytier", "easytier-gui/src-tauri"]
default-members = [ "easytier" ]
members = ["easytier", "easytier-gui/src-tauri"]
default-members = ["easytier"]
[profile.dev]
panic = "unwind"
[profile.release]
panic = "unwind"
panic = "abort"
lto = true
codegen-units = 1
strip = true

87
EasyTier.code-workspace Normal file
View File

@@ -0,0 +1,87 @@
{
"folders": [
{
"path": "."
},
{
"path": "easytier-gui"
},
{
"path": "easytier"
}
],
"settings": {
"eslint.experimental.useFlatConfig": true,
"prettier.enable": false,
"editor.formatOnSave": false,
"editor.codeActionsOnSave": {
"source.fixAll.eslint": "explicit",
"source.organizeImports": "never"
},
"eslint.rules.customizations": [
{
"rule": "style/*",
"severity": "off"
},
{
"rule": "style/eol-last",
"severity": "error"
},
{
"rule": "format/*",
"severity": "off"
},
{
"rule": "*-indent",
"severity": "off"
},
{
"rule": "*-spacing",
"severity": "off"
},
{
"rule": "*-spaces",
"severity": "off"
},
{
"rule": "*-order",
"severity": "off"
},
{
"rule": "*-dangle",
"severity": "off"
},
{
"rule": "*-newline",
"severity": "off"
},
{
"rule": "*quotes",
"severity": "off"
},
{
"rule": "*semi",
"severity": "off"
}
],
"eslint.validate": [
"code-workspace",
"javascript",
"javascriptreact",
"typescript",
"typescriptreact",
"vue",
"html",
"markdown",
"json",
"jsonc",
"yaml",
"toml",
"gql",
"graphql"
],
"i18n-ally.localesPaths": [
"easytier-gui/locales"
]
}
}

View File

@@ -3,10 +3,13 @@
[![GitHub](https://img.shields.io/github/license/KKRainbow/EasyTier)](https://github.com/KKRainbow/EasyTier/blob/main/LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/KKRainbow/EasyTier)](https://github.com/KKRainbow/EasyTier/commits/main)
[![GitHub issues](https://img.shields.io/github/issues/KKRainbow/EasyTier)](https://github.com/KKRainbow/EasyTier/issues)
[![GitHub actions](https://github.com/KKRainbow/EasyTier/actions/workflows/rust.yml/badge.svg)](https://github.com/KKRainbow/EasyTier/actions/)
[![GitHub Core Actions](https://github.com/KKRainbow/EasyTier/actions/workflows/core.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/core.yml)
[![GitHub GUI Actions](https://github.com/KKRainbow/EasyTier/actions/workflows/gui.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/gui.yml)
[简体中文](/README_CN.md) | [English](/README.md)
**Please visit the [EasyTier Official Website](https://www.easytier.top/en/) to view the full documentation.**
EasyTier is a simple, safe and decentralized VPN networking solution implemented with the Rust language and Tokio framework.
<p align="center">
@@ -27,7 +30,7 @@
- **TCP Support**: Provides reliable data transmission through concurrent TCP links when UDP is limited, optimizing performance.
- **High Availability**: Supports multi-path and switches to healthy paths when high packet loss or network errors are detected.
- **IPv6 Support**: Supports networking using IPv6.
- **Multiple Protocol Types**: Supports communication between nodes using protocols such as WebSocket and QUIC.
## Installation
@@ -218,19 +221,22 @@ After successfully starting easytier-core, use easytier-cli to obtain the WireGu
$> easytier-cli vpn-portal
portal_name: wireguard
client_config:
############### client_config_start ###############
[Interface]
PrivateKey = 9VDvlaIC9XHUvRuE06hD2CEDrtGF+0lDthgr9SZfIho=
Address = 10.14.14.0/24 # should assign an ip from this cidr manually
Address = 10.14.14.0/32 # should assign an ip from this cidr manually
[Peer]
PublicKey = zhrZQg4QdPZs8CajT3r4fmzcNsWpBL9ImQCUsnlXyGM=
AllowedIPs = 192.168.80.0/20,10.147.223.0/24,10.144.144.0/24
Endpoint = 0.0.0.0:11013 # should be the public ip of the vpn server
AllowedIPs = 10.144.144.0/24,10.14.14.0/24
Endpoint = 0.0.0.0:11013 # should be the public ip(or domain) of the vpn server
PersistentKeepalive = 25
############### client_config_end ###############
connected_clients:
[]
```
Before using the Client Config, you need to modify the Interface Address and Peer Endpoint to the client's IP and the IP of the EasyTier node, respectively. Import the configuration file into the WireGuard client to access the EasyTier network.
@@ -256,6 +262,7 @@ Before using the Client Config, you need to modify the Interface Address and Pee
- [ZeroTier](https://www.zerotier.com/): A global virtual network for connecting devices.
- [TailScale](https://tailscale.com/): A VPN solution aimed at simplifying network configuration.
- [vpncloud](https://github.com/dswd/vpncloud): A P2P Mesh VPN
- [Candy](https://github.com/lanthora/candy): A reliable, low-latency, and anti-censorship virtual private network
# License
@@ -265,4 +272,5 @@ Before using the Client Config, you need to modify the Interface Address and Pee
- Ask questions or report problems: [GitHub Issues](https://github.com/KKRainbow/EasyTier/issues)
- Discussion and exchange: [GitHub Discussions](https://github.com/KKRainbow/EasyTier/discussions)
- Telegramhttps://t.me/easytier
- Telegramhttps://t.me/easytier
- QQ Group: 949700262

View File

@@ -3,10 +3,13 @@
[![GitHub](https://img.shields.io/github/license/KKRainbow/EasyTier)](https://github.com/KKRainbow/EasyTier/blob/main/LICENSE)
[![GitHub last commit](https://img.shields.io/github/last-commit/KKRainbow/EasyTier)](https://github.com/KKRainbow/EasyTier/commits/main)
[![GitHub issues](https://img.shields.io/github/issues/KKRainbow/EasyTier)](https://github.com/KKRainbow/EasyTier/issues)
[![GitHub actions](https://github.com/KKRainbow/EasyTier/actions/workflows/rust.yml/badge.svg)](https://github.com/KKRainbow/EasyTier/actions/)
[![GitHub Core Actions](https://github.com/KKRainbow/EasyTier/actions/workflows/core.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/core.yml)
[![GitHub GUI Actions](https://github.com/KKRainbow/EasyTier/actions/workflows/gui.yml/badge.svg)](https://github.com/EasyTier/EasyTier/actions/workflows/gui.yml)
[简体中文](/README_CN.md) | [English](/README.md)
**请访问 [EasyTier 官网](https://www.easytier.top/) 以查看完整的文档。**
一个简单、安全、去中心化的内网穿透 VPN 组网方案,使用 Rust 语言和 Tokio 框架实现。
<p align="center">
@@ -27,6 +30,7 @@
- **TCP 支持**:在 UDP 受限的情况下,通过并发 TCP 链接提供可靠的数据传输,优化性能。
- **高可用性**:支持多路径和在检测到高丢包率或网络错误时切换到健康路径。
- **IPV6 支持**:支持利用 IPV6 组网。
- **多协议类型**: 支持使用 WebSocket、QUIC 等协议进行节点间通信。
## 安装
@@ -219,19 +223,22 @@ easytier-core 启动成功后,使用 easytier-cli 获取 WireGuard Client 的
$> easytier-cli vpn-portal
portal_name: wireguard
client_config:
############### client_config_start ###############
[Interface]
PrivateKey = 9VDvlaIC9XHUvRuE06hD2CEDrtGF+0lDthgr9SZfIho=
Address = 10.14.14.0/24 # should assign an ip from this cidr manually
Address = 10.14.14.0/32 # should assign an ip from this cidr manually
[Peer]
PublicKey = zhrZQg4QdPZs8CajT3r4fmzcNsWpBL9ImQCUsnlXyGM=
AllowedIPs = 192.168.80.0/20,10.147.223.0/24,10.144.144.0/24
Endpoint = 0.0.0.0:11013 # should be the public ip of the vpn server
AllowedIPs = 10.144.144.0/24,10.14.14.0/24
Endpoint = 0.0.0.0:11013 # should be the public ip(or domain) of the vpn server
PersistentKeepalive = 25
############### client_config_end ###############
connected_clients:
[]
```
使用 Client Config 前,需要将 Interface Address 和 Peer Endpoint 分别修改为客户端的 IP 和 EasyTier 节点的 IP。将配置文件导入 WireGuard 客户端,即可访问 EasyTier 网络。
@@ -259,6 +266,7 @@ connected_clients:
- [ZeroTier](https://www.zerotier.com/): 一个全球虚拟网络,用于连接设备。
- [TailScale](https://tailscale.com/): 一个旨在简化网络配置的 VPN 解决方案。
- [vpncloud](https://github.com/dswd/vpncloud): 一个 P2P Mesh VPN
- [Candy](https://github.com/lanthora/candy): 可靠、低延迟、抗审查的虚拟专用网络
# 许可证

View File

@@ -15,6 +15,7 @@ dist-ssr
# Editor directories and files
.vscode/*
!.vscode/extensions.json
!.vscode/settings.json
.idea
.DS_Store
*.suo

2
easytier-gui/.npmrc Normal file
View File

@@ -0,0 +1,2 @@
shamefully-hoist=true
strict-peer-dependencies=false

7
easytier-gui/.vscode/extensions.json vendored Normal file
View File

@@ -0,0 +1,7 @@
{
"recommendations": [
"dbaeumer.vscode-eslint",
"vue.volar",
"lokalise.i18n-ally"
]
}

5
easytier-gui/.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,5 @@
{
"i18n-ally.localesPaths": [
"locales"
]
}

View File

@@ -0,0 +1,12 @@
// @ts-check
import antfu from '@antfu/eslint-config'
export default antfu({
formatters: true,
rules: {
'style/eol-last': ['error', 'always'],
},
ignores: [
'src-tauri/**',
],
})

View File

@@ -0,0 +1,65 @@
network: 网络
networking_method: 网络方式
public_server: 公共服务器
manual: 手动
standalone: 独立
virtual_ipv4: 虚拟IPv4地址
virtual_ipv4_dhcp: DHCP
network_name: 网络名称
network_secret: 网络密码
public_server_url: 公共服务器地址
peer_urls: 对等节点地址
proxy_cidrs: 子网代理CIDR
enable_vpn_portal: 启用VPN门户
vpn_portal_listen_port: 监听端口
vpn_portal_client_network: 客户端子网
advanced_settings: 高级设置
basic_settings: 基础设置
listener_urls: 监听地址
rpc_port: RPC端口
config_network: 配置网络
running: 运行中
error_msg: 错误信息
detail: 详情
add_new_network: 添加新网络
del_cur_network: 删除当前网络
select_network: 选择网络
network_instances: 网络实例
instance_id: 实例ID
network_infos: 网络信息
parse_network_config: 解析网络配置
retain_network_instance: 保留网络实例
collect_network_infos: 收集网络信息
settings: 设置
exchange_language: Switch to English
disable_auto_launch: 关闭开机自启
enable_auto_launch: 开启开机自启
exit: 退出
chips_placeholder: 例如: {0}, 按回车添加
hostname_placeholder: '留空默认为主机名: {0}'
off_text: 点击关闭
on_text: 点击开启
show_config: 显示配置
close: 关闭
my_node_info: 当前节点信息
peer_count: 已连接
upload: 上传
download: 下载
show_vpn_portal_config: 显示VPN门户配置
vpn_portal_config: VPN门户配置
show_event_log: 显示事件日志
event_log: 事件日志
peer_info: 节点信息
hostname: 主机名
route_cost: 路由
latency: 延迟
upload_bytes: 上传
download_bytes: 下载
loss_rate: 丢包率
run_network: 运行网络
stop_network: 停止网络
network_running: 运行中
network_stopped: 已停止
dhcp_experimental_warning: 实验性警告使用DHCP时如果组网环境中发生IP冲突将自动更改IP。

View File

@@ -0,0 +1,65 @@
network: Network
networking_method: Networking Method
public_server: Public Server
manual: Manual
standalone: Standalone
virtual_ipv4: Virtual IPv4
virtual_ipv4_dhcp: DHCP
network_name: Network Name
network_secret: Network Secret
public_server_url: Public Server URL
peer_urls: Peer URLs
proxy_cidrs: Subnet Proxy CIDRs
enable_vpn_portal: Enable VPN Portal
vpn_portal_listen_port: VPN Portal Listen Port
vpn_portal_client_network: Client Sub Network
advanced_settings: Advanced Settings
basic_settings: Basic Settings
listener_urls: Listener URLs
rpc_port: RPC Port
config_network: Config Network
running: Running
error_msg: Error Message
detail: Detail
add_new_network: Add New Network
del_cur_network: Delete Current Network
select_network: Select Network
network_instances: Network Instances
instance_id: Instance ID
network_infos: Network Infos
parse_network_config: Parse Network Config
retain_network_instance: Retain Network Instance
collect_network_infos: Collect Network Infos
settings: Settings
exchange_language: 切换中文
disable_auto_launch: Disable Launch on Reboot
enable_auto_launch: Enable Launch on Reboot
exit: Exit
chips_placeholder: 'e.g: {0}, press Enter to add'
hostname_placeholder: 'Leave blank and default to host name: {0}'
off_text: Press to disable
on_text: Press to enable
show_config: Show Config
close: Close
my_node_info: My Node Info
peer_count: Connected
upload: Upload
download: Download
show_vpn_portal_config: Show VPN Portal Config
vpn_portal_config: VPN Portal Config
show_event_log: Show Event Log
event_log: Event Log
peer_info: Peer Info
route_cost: Route Cost
hostname: Hostname
latency: Latency
upload_bytes: Upload
download_bytes: Download
loss_rate: Loss Rate
run_network: Run Network
stop_network: Stop Network
network_running: running
network_stopped: stopped
dhcp_experimental_warning: Experimental warning! if there is an IP conflict in the network when using DHCP, the IP will be automatically changed.

View File

@@ -1,37 +1,50 @@
{
"name": "easytier-gui",
"private": true,
"version": "0.0.0",
"type": "module",
"version": "0.0.0",
"private": true,
"scripts": {
"dev": "vite",
"build": "vue-tsc --noEmit && vite build",
"preview": "vite preview",
"tauri": "tauri"
"tauri": "tauri",
"lint": "eslint . --ignore-pattern src-tauri",
"lint:fix": "eslint . --ignore-pattern src-tauri --fix"
},
"dependencies": {
"@tauri-apps/api": "^1",
"@tauri-apps/api": "^1.5.5",
"pinia": "^2.1.7",
"primeflex": "^3.3.1",
"primeicons": "^7.0.0",
"primevue": "^3.51.0",
"vue": "^3.3.4",
"vue-router": "^4.3.0"
"primevue": "^3.52.0",
"vue": "^3.4.27",
"vue-i18n": "^9.13.1",
"vue-router": "^4.3.2"
},
"devDependencies": {
"@tauri-apps/cli": "^1",
"@antfu/eslint-config": "^2.17.0",
"@intlify/unplugin-vue-i18n": "^4.0.0",
"@tauri-apps/cli": "^1.5.13",
"@types/node": "^20.12.11",
"@types/uuid": "^9.0.8",
"@vitejs/plugin-vue": "^5.0.4",
"@vue-macros/volar": "^0.19.0",
"autoprefixer": "^10.4.19",
"naive-ui": "^2.38.1",
"eslint": "^9.2.0",
"eslint-plugin-format": "^0.1.1",
"postcss": "^8.4.38",
"tailwindcss": "^3.4.3",
"typescript": "^5.0.2",
"unplugin-vue-components": "^0.26.0",
"typescript": "^5.4.5",
"unplugin-auto-import": "^0.17.6",
"unplugin-vue-components": "^0.27.0",
"unplugin-vue-macros": "^2.9.2",
"unplugin-vue-markdown": "^0.26.2",
"unplugin-vue-router": "^0.8.6",
"uuid": "^9.0.1",
"vfonts": "^0.0.3",
"vite": "^5.0.0",
"vue-i18n": "^9.12.0",
"vue-tsc": "^1.8.5"
"vite": "^5.2.11",
"vite-plugin-vue-devtools": "^7.1.3",
"vite-plugin-vue-layouts": "^0.11.0",
"vue-i18n": "^9.13.1",
"vue-tsc": "^2.0.17"
}
}

5924
easytier-gui/pnpm-lock.yaml generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
[package]
name = "easytier-gui"
version = "0.0.0"
description = "A Tauri App"
description = "EasyTier GUI"
authors = ["you"]
edition = "2021"
@@ -11,7 +11,11 @@ edition = "2021"
tauri-build = { version = "1", features = [] }
[dependencies]
tauri = { version = "1", features = [ "process-exit", "system-tray", "shell-open"] }
tauri = { version = "1", features = [
"process-exit",
"system-tray",
"shell-open",
] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
@@ -24,6 +28,10 @@ once_cell = "1.18.0"
dashmap = "5.5.3"
privilege = "0.3"
gethostname = "0.4.3"
auto-launch = "0.5.0"
dunce = "1.0.4"
[features]
# This feature is used for production builds or when a dev server is not specified, DO NOT REMOVE!!

View File

@@ -1,3 +1,34 @@
fn main() {
tauri_build::build()
if !cfg!(debug_assertions) && cfg!(target_os = "windows") {
let mut windows = tauri_build::WindowsAttributes::new();
windows = windows.app_manifest(
r#"
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="requireAdministrator" uiAccess="false" />
</requestedPrivileges>
</security>
</trustInfo>
</assembly>
"#,
);
tauri_build::try_build(tauri_build::Attributes::new().windows_attributes(windows))
.expect("failed to run build script");
} else {
tauri_build::build();
}
}

View File

@@ -1,22 +1,17 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod launcher;
use std::{collections::BTreeMap, env::current_exe, process};
use anyhow::Context;
use chrono::{DateTime, Local};
use auto_launch::AutoLaunchBuilder;
use dashmap::DashMap;
use easytier::{
common::{
config::{ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader, VpnPortalConfig},
global_ctx::GlobalCtxEvent,
common::config::{
ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader, VpnPortalConfig,
},
rpc::{PeerInfo, Route},
utils::{list_peer_route_pair, PeerRoutePair},
launcher::{NetworkInstance, NetworkInstanceRunningInfo},
};
use launcher::{EasyTierLauncher, MyNodeInfo};
use serde::{Deserialize, Serialize};
use tauri::{
@@ -41,7 +36,9 @@ impl Default for NetworkingMethod {
struct NetworkConfig {
instance_id: String,
dhcp: bool,
virtual_ipv4: String,
hostname: Option<String>,
network_name: String,
network_secret: String,
networking_method: NetworkingMethod,
@@ -52,7 +49,7 @@ struct NetworkConfig {
proxy_cidrs: Vec<String>,
enable_vpn_portal: bool,
vpn_portal_listne_port: i32,
vpn_portal_listen_port: i32,
vpn_portal_client_network_addr: String,
vpn_portal_client_network_len: i32,
@@ -70,18 +67,20 @@ impl NetworkConfig {
.parse()
.with_context(|| format!("failed to parse instance id: {}", self.instance_id))?,
);
cfg.set_hostname(self.hostname.clone());
cfg.set_dhcp(self.dhcp);
cfg.set_inst_name(self.network_name.clone());
cfg.set_network_identity(NetworkIdentity::new(
self.network_name.clone(),
self.network_secret.clone(),
));
if self.virtual_ipv4.len() > 0 {
cfg.set_ipv4(
self.virtual_ipv4.parse().with_context(|| {
if !self.dhcp {
if self.virtual_ipv4.len() > 0 {
cfg.set_ipv4(Some(self.virtual_ipv4.parse().with_context(|| {
format!("failed to parse ipv4 address: {}", self.virtual_ipv4)
})?,
)
})?))
}
}
match self.networking_method {
@@ -148,12 +147,12 @@ impl NetworkConfig {
client_cidr: cidr
.parse()
.with_context(|| format!("failed to parse vpn portal client cidr: {}", cidr))?,
wireguard_listen: format!("0.0.0.0:{}", self.vpn_portal_listne_port)
wireguard_listen: format!("0.0.0.0:{}", self.vpn_portal_listen_port)
.parse()
.with_context(|| {
format!(
"failed to parse vpn portal wireguard listen port. {}",
self.vpn_portal_listne_port
self.vpn_portal_listen_port
)
})?,
});
@@ -163,102 +162,34 @@ impl NetworkConfig {
}
}
#[derive(Deserialize, Serialize, Debug)]
struct NetworkInstanceRunningInfo {
my_node_info: MyNodeInfo,
events: Vec<(DateTime<Local>, GlobalCtxEvent)>,
node_info: MyNodeInfo,
routes: Vec<Route>,
peers: Vec<PeerInfo>,
peer_route_pairs: Vec<PeerRoutePair>,
running: bool,
error_msg: Option<String>,
}
struct NetworkInstance {
config: TomlConfigLoader,
launcher: Option<EasyTierLauncher>,
}
impl NetworkInstance {
fn new(cfg: NetworkConfig) -> Result<Self, anyhow::Error> {
Ok(Self {
config: cfg.gen_config()?,
launcher: None,
})
}
fn is_easytier_running(&self) -> bool {
self.launcher.is_some() && self.launcher.as_ref().unwrap().running()
}
fn get_running_info(&self) -> Option<NetworkInstanceRunningInfo> {
if self.launcher.is_none() {
return None;
}
let launcher = self.launcher.as_ref().unwrap();
let peers = launcher.get_peers();
let routes = launcher.get_routes();
let peer_route_pairs = list_peer_route_pair(peers.clone(), routes.clone());
Some(NetworkInstanceRunningInfo {
my_node_info: launcher.get_node_info(),
events: launcher.get_events(),
node_info: launcher.get_node_info(),
routes,
peers,
peer_route_pairs,
running: launcher.running(),
error_msg: launcher.error_msg(),
})
}
fn start(&mut self) -> Result<(), anyhow::Error> {
if self.is_easytier_running() {
return Ok(());
}
let mut launcher = EasyTierLauncher::new();
launcher.start(|| Ok(self.config.clone()));
self.launcher = Some(launcher);
Ok(())
}
}
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> =
once_cell::sync::Lazy::new(DashMap::new);
// Learn more about Tauri commands at https://tauri.app/v1/guides/features/command
#[tauri::command]
fn parse_network_config(cfg: &str) -> Result<String, String> {
let cfg: NetworkConfig = serde_json::from_str(cfg).map_err(|e| e.to_string())?;
fn parse_network_config(cfg: NetworkConfig) -> Result<String, String> {
let toml = cfg.gen_config().map_err(|e| e.to_string())?;
Ok(toml.dump())
}
#[tauri::command]
fn run_network_instance(cfg: &str) -> Result<String, String> {
let cfg: NetworkConfig = serde_json::from_str(cfg).map_err(|e| e.to_string())?;
fn run_network_instance(cfg: NetworkConfig) -> Result<(), String> {
if INSTANCE_MAP.contains_key(&cfg.instance_id) {
return Err("instance already exists".to_string());
}
let instance_id = cfg.instance_id.clone();
let mut instance = NetworkInstance::new(cfg).map_err(|e| e.to_string())?;
let cfg = cfg.gen_config().map_err(|e| e.to_string())?;
let mut instance = NetworkInstance::new(cfg);
instance.start().map_err(|e| e.to_string())?;
println!("instance {} started", instance_id);
INSTANCE_MAP.insert(instance_id, instance);
Ok("".to_string())
Ok(())
}
#[tauri::command]
fn retain_network_instance(instance_ids: &str) -> Result<(), String> {
let instance_ids: Vec<String> =
serde_json::from_str(instance_ids).map_err(|e| e.to_string())?;
fn retain_network_instance(instance_ids: Vec<String>) -> Result<(), String> {
let _ = INSTANCE_MAP.retain(|k, _| instance_ids.contains(k));
println!(
"instance {:?} retained",
@@ -271,14 +202,24 @@ fn retain_network_instance(instance_ids: &str) -> Result<(), String> {
}
#[tauri::command]
fn collect_network_infos() -> Result<String, String> {
fn collect_network_infos() -> Result<BTreeMap<String, NetworkInstanceRunningInfo>, String> {
let mut ret = BTreeMap::new();
for instance in INSTANCE_MAP.iter() {
if let Some(info) = instance.get_running_info() {
ret.insert(instance.key().clone(), info);
}
}
Ok(serde_json::to_string(&ret).map_err(|e| e.to_string())?)
Ok(ret)
}
#[tauri::command]
fn get_os_hostname() -> Result<String, String> {
Ok(gethostname::gethostname().to_string_lossy().to_string())
}
#[tauri::command]
fn set_auto_launch_status(app_handle: tauri::AppHandle, enable: bool) -> Result<bool, String> {
Ok(init_launch(&app_handle, enable).map_err(|e| e.to_string())?)
}
fn toggle_window_visibility(window: &Window) {
@@ -302,6 +243,65 @@ fn check_sudo() -> bool {
is_elevated
}
/// init the auto launch
pub fn init_launch(_app_handle: &tauri::AppHandle, enable: bool) -> Result<bool, anyhow::Error> {
let app_exe = current_exe()?;
let app_exe = dunce::canonicalize(app_exe)?;
let app_name = app_exe
.file_stem()
.and_then(|f| f.to_str())
.ok_or(anyhow::anyhow!("failed to get file stem"))?;
let app_path = app_exe
.as_os_str()
.to_str()
.ok_or(anyhow::anyhow!("failed to get app_path"))?
.to_string();
#[cfg(target_os = "windows")]
let app_path = format!("\"{app_path}\"");
// use the /Applications/easytier-gui.app
#[cfg(target_os = "macos")]
let app_path = (|| -> Option<String> {
let path = std::path::PathBuf::from(&app_path);
let path = path.parent()?.parent()?.parent()?;
let extension = path.extension()?.to_str()?;
match extension == "app" {
true => Some(path.as_os_str().to_str()?.to_string()),
false => None,
}
})()
.unwrap_or(app_path);
#[cfg(target_os = "linux")]
let app_path = {
let appimage = _app_handle.env().appimage;
appimage
.and_then(|p| p.to_str().map(|s| s.to_string()))
.unwrap_or(app_path)
};
let auto = AutoLaunchBuilder::new()
.set_app_name(app_name)
.set_app_path(&app_path)
.build()
.with_context(|| "failed to build auto launch")?;
if enable && !auto.is_enabled().unwrap_or(false) {
// 避免重复设置登录项
let _ = auto.disable();
auto.enable()
.with_context(|| "failed to enable auto launch")?
} else if !enable {
let _ = auto.disable();
}
let enabled = auto.is_enabled()?;
Ok(enabled)
}
fn main() {
if !check_sudo() {
process::exit(0);
@@ -318,7 +318,9 @@ fn main() {
parse_network_config,
run_network_instance,
retain_network_instance,
collect_network_infos
collect_network_infos,
get_os_hostname,
set_auto_launch_status
])
.system_tray(SystemTray::new().with_menu(tray_menu))
.on_system_tray_event(|app, event| match event {

View File

@@ -1,7 +1,7 @@
{
"build": {
"beforeDevCommand": "yarn dev",
"beforeBuildCommand": "yarn build",
"beforeDevCommand": "pnpm dev",
"beforeBuildCommand": "pnpm build",
"devPath": "http://localhost:1420",
"distDir": "../dist"
},
@@ -46,4 +46,4 @@
]
}
}
}
}

View File

@@ -1,258 +1,3 @@
<script setup lang="ts">
import { ref, onMounted, onUnmounted, computed } from 'vue'
import Stepper from 'primevue/stepper';
import StepperPanel from 'primevue/stepperpanel';
import { useToast } from "primevue/usetoast";
import {
i18n, loadLocaleFromLocalStorage, NetworkConfig, parseNetworkConfig,
useNetworkStore, runNetworkInstance, retainNetworkInstance, collectNetworkInfos,
changeLocale
} from './main';
import Config from './components/Config.vue';
import Status from './components/Status.vue';
import { exit } from '@tauri-apps/api/process';
const visible = ref(false);
const tomlConfig = ref("");
const items = ref([
{
label: () => i18n.global.t('show_config'),
icon: 'pi pi-file-edit',
command: async () => {
try {
const ret = await parseNetworkConfig(networkStore.curNetwork);
tomlConfig.value = ret;
} catch (e: any) {
tomlConfig.value = e;
}
visible.value = true;
}
},
{
label: () => i18n.global.t('del_cur_network'),
icon: 'pi pi-times',
command: async () => {
networkStore.removeNetworkInstance(networkStore.curNetwork.instance_id);
await retainNetworkInstance(networkStore.networkInstanceIds);
networkStore.delCurNetwork();
},
disabled: () => networkStore.networkList.length <= 1,
},
])
enum Severity {
None = "none",
Success = "success",
Info = "info",
Warn = "warn",
Error = "error",
}
const messageBarSeverity = ref(Severity.None);
const messageBarContent = ref("");
const toast = useToast();
const networkStore = useNetworkStore();
const addNewNetwork = () => {
networkStore.addNewNetwork();
networkStore.curNetwork = networkStore.lastNetwork;
}
const networkMenuName = (network: NetworkConfig) => {
return network.network_name + " (" + network.instance_id + ")";
}
networkStore.$subscribe(async () => {
networkStore.saveToLocalStroage();
try {
await parseNetworkConfig(networkStore.curNetwork);
messageBarSeverity.value = Severity.None;
} catch (e: any) {
messageBarContent.value = e;
messageBarSeverity.value = Severity.Error;
}
});
async function runNetworkCb(cfg: NetworkConfig, cb: (e: MouseEvent) => void) {
cb({} as MouseEvent);
networkStore.removeNetworkInstance(cfg.instance_id);
await retainNetworkInstance(networkStore.networkInstanceIds);
networkStore.addNetworkInstance(cfg.instance_id);
try {
await runNetworkInstance(cfg);
} catch (e: any) {
console.error(e);
toast.add({ severity: 'info', detail: e });
}
}
async function stopNetworkCb(cfg: NetworkConfig, cb: (e: MouseEvent) => void) {
console.log("stopNetworkCb", cfg, cb);
cb({} as MouseEvent);
networkStore.removeNetworkInstance(cfg.instance_id);
await retainNetworkInstance(networkStore.networkInstanceIds);
}
async function updateNetworkInfos() {
networkStore.updateWithNetworkInfos(await collectNetworkInfos());
}
let intervalId = 0;
onMounted(() => {
intervalId = setInterval(async () => {
await updateNetworkInfos();
}, 500);
});
onUnmounted(() => clearInterval(intervalId))
const curNetworkHasInstance = computed(() => {
return networkStore.networkInstanceIds.includes(networkStore.curNetworkId);
});
const activeStep = computed(() => {
return curNetworkHasInstance.value ? 1 : 0;
});
const setting_menu = ref();
const setting_menu_items = ref([
{
label: () => i18n.global.t('settings'),
items: [
{
label: () => i18n.global.t('exchange_language'),
icon: 'pi pi-refresh',
command: () => {
changeLocale((i18n.global.locale.value === 'en' ? 'cn' : 'en'));
}
},
{
label: () => i18n.global.t('exit'),
icon: 'pi pi-times',
command: async () => {
await exit(1);
}
}
]
}
]);
const toggle_setting_menu = (event: any) => {
setting_menu.value.toggle(event);
};
onMounted(async () => {
networkStore.loadFromLocalStorage();
changeLocale(loadLocaleFromLocalStorage());
});
</script>
<template>
<!-- <n-config-provider :theme="lightTheme"> -->
<div id="root" class="flex flex-column">
<Dialog v-model:visible="visible" modal header="Config File" :style="{ width: '70%' }">
<Panel>
<ScrollPanel style="width: 100%; height: 300px">
<pre>{{ tomlConfig }}</pre>
</ScrollPanel>
</Panel>
<Divider />
<div class="flex justify-content-end gap-2">
<Button type="button" :label="$t('close')" @click="visible = false"></Button>
</div>
</Dialog>
<div>
<Toolbar>
<template #start>
<div class="flex align-items-center gap-2">
<Button icon="pi pi-plus" class="mr-2" severity="primary" :label="$t('add_new_network')"
@click="addNewNetwork" />
</div>
</template>
<template #center>
<div class="min-w-80 mr-20">
<Dropdown v-model="networkStore.curNetwork" :options="networkStore.networkList"
:optionLabel="networkMenuName" :placeholder="$t('select_network')" :highlightOnSelect="true"
:checkmark="true" class="w-full md:w-32rem" />
</div>
</template>
<template #end>
<Button icon="pi pi-cog" class="mr-2" severity="secondary" aria-haspopup="true" @click="toggle_setting_menu"
:label="$t('settings')" aria-controls="overlay_setting_menu" />
<Menu ref="setting_menu" id="overlay_setting_menu" :model="setting_menu_items" :popup="true" />
</template>
</Toolbar>
</div>
<Stepper class="h-full overflow-y-auto" :active-step="activeStep">
<StepperPanel :header="$t('config_network')" class="w">
<template #content="{ nextCallback }">
<Config @run-network="runNetworkCb($event, nextCallback)" :instance-id="networkStore.curNetworkId"
:config-invalid="messageBarSeverity != Severity.None" />
</template>
</StepperPanel>
<StepperPanel :header="$t('running')">
<template #content="{ prevCallback }">
<div class="flex flex-column">
<Status :instance-id="networkStore.curNetworkId" />
</div>
<div class="flex pt-4 justify-content-center">
<Button :label="$t('stop_network')" severity="danger" icon="pi pi-arrow-left"
@click="stopNetworkCb(networkStore.curNetwork, prevCallback)" />
</div>
</template>
</StepperPanel>
</Stepper>
<div>
<Menubar :model="items" breakpoint="300px">
</Menubar>
<InlineMessage v-if="messageBarSeverity !== Severity.None" class="absolute bottom-0 right-0" severity="error">
{{ messageBarContent }}</InlineMessage>
</div>
</div>
<RouterView />
</template>
<style scoped>
#root {
height: 100vh;
width: 100vw;
}
</style>
<style>
body {
height: 100vh;
width: 100vw;
padding: 0;
margin: 0;
overflow: hidden;
}
.p-menubar .p-menuitem {
margin: 0;
}
/*
.p-tabview-panel {
height: 100%;
} */
</style>
<script lang="ts">
</script>

263
easytier-gui/src/auto-imports.d.ts vendored Normal file
View File

@@ -0,0 +1,263 @@
/* eslint-disable */
/* prettier-ignore */
// @ts-nocheck
// noinspection JSUnusedGlobalSymbols
// Generated by unplugin-auto-import
export {}
declare global {
const EffectScope: typeof import('vue')['EffectScope']
const acceptHMRUpdate: typeof import('pinia')['acceptHMRUpdate']
const collectNetworkInfos: typeof import('./composables/network')['collectNetworkInfos']
const computed: typeof import('vue')['computed']
const createApp: typeof import('vue')['createApp']
const createPinia: typeof import('pinia')['createPinia']
const customRef: typeof import('vue')['customRef']
const defineAsyncComponent: typeof import('vue')['defineAsyncComponent']
const defineComponent: typeof import('vue')['defineComponent']
const definePage: typeof import('unplugin-vue-router/runtime')['definePage']
const defineStore: typeof import('pinia')['defineStore']
const effectScope: typeof import('vue')['effectScope']
const getActivePinia: typeof import('pinia')['getActivePinia']
const getCurrentInstance: typeof import('vue')['getCurrentInstance']
const getCurrentScope: typeof import('vue')['getCurrentScope']
const getOsHostname: typeof import('./composables/network')['getOsHostname']
const h: typeof import('vue')['h']
const inject: typeof import('vue')['inject']
const isProxy: typeof import('vue')['isProxy']
const isReactive: typeof import('vue')['isReactive']
const isReadonly: typeof import('vue')['isReadonly']
const isRef: typeof import('vue')['isRef']
const loadRunningInstanceIdsFromLocalStorage: typeof import('./stores/network')['loadRunningInstanceIdsFromLocalStorage']
const mapActions: typeof import('pinia')['mapActions']
const mapGetters: typeof import('pinia')['mapGetters']
const mapState: typeof import('pinia')['mapState']
const mapStores: typeof import('pinia')['mapStores']
const mapWritableState: typeof import('pinia')['mapWritableState']
const markRaw: typeof import('vue')['markRaw']
const nextTick: typeof import('vue')['nextTick']
const onActivated: typeof import('vue')['onActivated']
const onBeforeMount: typeof import('vue')['onBeforeMount']
const onBeforeRouteLeave: typeof import('vue-router/auto')['onBeforeRouteLeave']
const onBeforeRouteUpdate: typeof import('vue-router/auto')['onBeforeRouteUpdate']
const onBeforeUnmount: typeof import('vue')['onBeforeUnmount']
const onBeforeUpdate: typeof import('vue')['onBeforeUpdate']
const onDeactivated: typeof import('vue')['onDeactivated']
const onErrorCaptured: typeof import('vue')['onErrorCaptured']
const onMounted: typeof import('vue')['onMounted']
const onRenderTracked: typeof import('vue')['onRenderTracked']
const onRenderTriggered: typeof import('vue')['onRenderTriggered']
const onScopeDispose: typeof import('vue')['onScopeDispose']
const onServerPrefetch: typeof import('vue')['onServerPrefetch']
const onUnmounted: typeof import('vue')['onUnmounted']
const onUpdated: typeof import('vue')['onUpdated']
const parseNetworkConfig: typeof import('./composables/network')['parseNetworkConfig']
const provide: typeof import('vue')['provide']
const reactive: typeof import('vue')['reactive']
const readonly: typeof import('vue')['readonly']
const ref: typeof import('vue')['ref']
const resolveComponent: typeof import('vue')['resolveComponent']
const retainNetworkInstance: typeof import('./composables/network')['retainNetworkInstance']
const runNetworkInstance: typeof import('./composables/network')['runNetworkInstance']
const setActivePinia: typeof import('pinia')['setActivePinia']
const setAutoLaunchStatus: typeof import('./composables/network')['setAutoLaunchStatus']
const setMapStoreSuffix: typeof import('pinia')['setMapStoreSuffix']
const shallowReactive: typeof import('vue')['shallowReactive']
const shallowReadonly: typeof import('vue')['shallowReadonly']
const shallowRef: typeof import('vue')['shallowRef']
const storeToRefs: typeof import('pinia')['storeToRefs']
const toRaw: typeof import('vue')['toRaw']
const toRef: typeof import('vue')['toRef']
const toRefs: typeof import('vue')['toRefs']
const toValue: typeof import('vue')['toValue']
const triggerRef: typeof import('vue')['triggerRef']
const unref: typeof import('vue')['unref']
const useAttrs: typeof import('vue')['useAttrs']
const useCssModule: typeof import('vue')['useCssModule']
const useCssVars: typeof import('vue')['useCssVars']
const useI18n: typeof import('vue-i18n')['useI18n']
const useLink: typeof import('vue-router/auto')['useLink']
const useNetworkStore: typeof import('./stores/network')['useNetworkStore']
const useRoute: typeof import('vue-router/auto')['useRoute']
const useRouter: typeof import('vue-router/auto')['useRouter']
const useSlots: typeof import('vue')['useSlots']
const watch: typeof import('vue')['watch']
const watchEffect: typeof import('vue')['watchEffect']
const watchPostEffect: typeof import('vue')['watchPostEffect']
const watchSyncEffect: typeof import('vue')['watchSyncEffect']
}
// for type re-export
declare global {
// @ts-ignore
export type { Component, ComponentPublicInstance, ComputedRef, ExtractDefaultPropTypes, ExtractPropTypes, ExtractPublicPropTypes, InjectionKey, PropType, Ref, VNode, WritableComputedRef } from 'vue'
import('vue')
}
// for vue template auto import
import { UnwrapRef } from 'vue'
declare module 'vue' {
interface GlobalComponents {}
interface ComponentCustomProperties {
readonly EffectScope: UnwrapRef<typeof import('vue')['EffectScope']>
readonly acceptHMRUpdate: UnwrapRef<typeof import('pinia')['acceptHMRUpdate']>
readonly collectNetworkInfos: UnwrapRef<typeof import('./composables/network')['collectNetworkInfos']>
readonly computed: UnwrapRef<typeof import('vue')['computed']>
readonly createApp: UnwrapRef<typeof import('vue')['createApp']>
readonly createPinia: UnwrapRef<typeof import('pinia')['createPinia']>
readonly customRef: UnwrapRef<typeof import('vue')['customRef']>
readonly defineAsyncComponent: UnwrapRef<typeof import('vue')['defineAsyncComponent']>
readonly defineComponent: UnwrapRef<typeof import('vue')['defineComponent']>
readonly definePage: UnwrapRef<typeof import('unplugin-vue-router/runtime')['definePage']>
readonly defineStore: UnwrapRef<typeof import('pinia')['defineStore']>
readonly effectScope: UnwrapRef<typeof import('vue')['effectScope']>
readonly getActivePinia: UnwrapRef<typeof import('pinia')['getActivePinia']>
readonly getCurrentInstance: UnwrapRef<typeof import('vue')['getCurrentInstance']>
readonly getCurrentScope: UnwrapRef<typeof import('vue')['getCurrentScope']>
readonly getOsHostname: UnwrapRef<typeof import('./composables/network')['getOsHostname']>
readonly h: UnwrapRef<typeof import('vue')['h']>
readonly inject: UnwrapRef<typeof import('vue')['inject']>
readonly isProxy: UnwrapRef<typeof import('vue')['isProxy']>
readonly isReactive: UnwrapRef<typeof import('vue')['isReactive']>
readonly isReadonly: UnwrapRef<typeof import('vue')['isReadonly']>
readonly isRef: UnwrapRef<typeof import('vue')['isRef']>
readonly loadRunningInstanceIdsFromLocalStorage: UnwrapRef<typeof import('./stores/network')['loadRunningInstanceIdsFromLocalStorage']>
readonly mapActions: UnwrapRef<typeof import('pinia')['mapActions']>
readonly mapGetters: UnwrapRef<typeof import('pinia')['mapGetters']>
readonly mapState: UnwrapRef<typeof import('pinia')['mapState']>
readonly mapStores: UnwrapRef<typeof import('pinia')['mapStores']>
readonly mapWritableState: UnwrapRef<typeof import('pinia')['mapWritableState']>
readonly markRaw: UnwrapRef<typeof import('vue')['markRaw']>
readonly nextTick: UnwrapRef<typeof import('vue')['nextTick']>
readonly onActivated: UnwrapRef<typeof import('vue')['onActivated']>
readonly onBeforeMount: UnwrapRef<typeof import('vue')['onBeforeMount']>
readonly onBeforeRouteLeave: UnwrapRef<typeof import('vue-router/auto')['onBeforeRouteLeave']>
readonly onBeforeRouteUpdate: UnwrapRef<typeof import('vue-router/auto')['onBeforeRouteUpdate']>
readonly onBeforeUnmount: UnwrapRef<typeof import('vue')['onBeforeUnmount']>
readonly onBeforeUpdate: UnwrapRef<typeof import('vue')['onBeforeUpdate']>
readonly onDeactivated: UnwrapRef<typeof import('vue')['onDeactivated']>
readonly onErrorCaptured: UnwrapRef<typeof import('vue')['onErrorCaptured']>
readonly onMounted: UnwrapRef<typeof import('vue')['onMounted']>
readonly onRenderTracked: UnwrapRef<typeof import('vue')['onRenderTracked']>
readonly onRenderTriggered: UnwrapRef<typeof import('vue')['onRenderTriggered']>
readonly onScopeDispose: UnwrapRef<typeof import('vue')['onScopeDispose']>
readonly onServerPrefetch: UnwrapRef<typeof import('vue')['onServerPrefetch']>
readonly onUnmounted: UnwrapRef<typeof import('vue')['onUnmounted']>
readonly onUpdated: UnwrapRef<typeof import('vue')['onUpdated']>
readonly parseNetworkConfig: UnwrapRef<typeof import('./composables/network')['parseNetworkConfig']>
readonly provide: UnwrapRef<typeof import('vue')['provide']>
readonly reactive: UnwrapRef<typeof import('vue')['reactive']>
readonly readonly: UnwrapRef<typeof import('vue')['readonly']>
readonly ref: UnwrapRef<typeof import('vue')['ref']>
readonly resolveComponent: UnwrapRef<typeof import('vue')['resolveComponent']>
readonly retainNetworkInstance: UnwrapRef<typeof import('./composables/network')['retainNetworkInstance']>
readonly runNetworkInstance: UnwrapRef<typeof import('./composables/network')['runNetworkInstance']>
readonly setActivePinia: UnwrapRef<typeof import('pinia')['setActivePinia']>
readonly setAutoLaunchStatus: UnwrapRef<typeof import('./composables/network')['setAutoLaunchStatus']>
readonly setMapStoreSuffix: UnwrapRef<typeof import('pinia')['setMapStoreSuffix']>
readonly shallowReactive: UnwrapRef<typeof import('vue')['shallowReactive']>
readonly shallowReadonly: UnwrapRef<typeof import('vue')['shallowReadonly']>
readonly shallowRef: UnwrapRef<typeof import('vue')['shallowRef']>
readonly storeToRefs: UnwrapRef<typeof import('pinia')['storeToRefs']>
readonly toRaw: UnwrapRef<typeof import('vue')['toRaw']>
readonly toRef: UnwrapRef<typeof import('vue')['toRef']>
readonly toRefs: UnwrapRef<typeof import('vue')['toRefs']>
readonly toValue: UnwrapRef<typeof import('vue')['toValue']>
readonly triggerRef: UnwrapRef<typeof import('vue')['triggerRef']>
readonly unref: UnwrapRef<typeof import('vue')['unref']>
readonly useAttrs: UnwrapRef<typeof import('vue')['useAttrs']>
readonly useCssModule: UnwrapRef<typeof import('vue')['useCssModule']>
readonly useCssVars: UnwrapRef<typeof import('vue')['useCssVars']>
readonly useI18n: UnwrapRef<typeof import('vue-i18n')['useI18n']>
readonly useLink: UnwrapRef<typeof import('vue-router/auto')['useLink']>
readonly useNetworkStore: UnwrapRef<typeof import('./stores/network')['useNetworkStore']>
readonly useRoute: UnwrapRef<typeof import('vue-router/auto')['useRoute']>
readonly useRouter: UnwrapRef<typeof import('vue-router/auto')['useRouter']>
readonly useSlots: UnwrapRef<typeof import('vue')['useSlots']>
readonly watch: UnwrapRef<typeof import('vue')['watch']>
readonly watchEffect: UnwrapRef<typeof import('vue')['watchEffect']>
readonly watchPostEffect: UnwrapRef<typeof import('vue')['watchPostEffect']>
readonly watchSyncEffect: UnwrapRef<typeof import('vue')['watchSyncEffect']>
}
}
declare module '@vue/runtime-core' {
interface GlobalComponents {}
interface ComponentCustomProperties {
readonly EffectScope: UnwrapRef<typeof import('vue')['EffectScope']>
readonly acceptHMRUpdate: UnwrapRef<typeof import('pinia')['acceptHMRUpdate']>
readonly collectNetworkInfos: UnwrapRef<typeof import('./composables/network')['collectNetworkInfos']>
readonly computed: UnwrapRef<typeof import('vue')['computed']>
readonly createApp: UnwrapRef<typeof import('vue')['createApp']>
readonly createPinia: UnwrapRef<typeof import('pinia')['createPinia']>
readonly customRef: UnwrapRef<typeof import('vue')['customRef']>
readonly defineAsyncComponent: UnwrapRef<typeof import('vue')['defineAsyncComponent']>
readonly defineComponent: UnwrapRef<typeof import('vue')['defineComponent']>
readonly definePage: UnwrapRef<typeof import('unplugin-vue-router/runtime')['definePage']>
readonly defineStore: UnwrapRef<typeof import('pinia')['defineStore']>
readonly effectScope: UnwrapRef<typeof import('vue')['effectScope']>
readonly getActivePinia: UnwrapRef<typeof import('pinia')['getActivePinia']>
readonly getCurrentInstance: UnwrapRef<typeof import('vue')['getCurrentInstance']>
readonly getCurrentScope: UnwrapRef<typeof import('vue')['getCurrentScope']>
readonly getOsHostname: UnwrapRef<typeof import('./composables/network')['getOsHostname']>
readonly h: UnwrapRef<typeof import('vue')['h']>
readonly inject: UnwrapRef<typeof import('vue')['inject']>
readonly isProxy: UnwrapRef<typeof import('vue')['isProxy']>
readonly isReactive: UnwrapRef<typeof import('vue')['isReactive']>
readonly isReadonly: UnwrapRef<typeof import('vue')['isReadonly']>
readonly isRef: UnwrapRef<typeof import('vue')['isRef']>
readonly loadRunningInstanceIdsFromLocalStorage: UnwrapRef<typeof import('./stores/network')['loadRunningInstanceIdsFromLocalStorage']>
readonly mapActions: UnwrapRef<typeof import('pinia')['mapActions']>
readonly mapGetters: UnwrapRef<typeof import('pinia')['mapGetters']>
readonly mapState: UnwrapRef<typeof import('pinia')['mapState']>
readonly mapStores: UnwrapRef<typeof import('pinia')['mapStores']>
readonly mapWritableState: UnwrapRef<typeof import('pinia')['mapWritableState']>
readonly markRaw: UnwrapRef<typeof import('vue')['markRaw']>
readonly nextTick: UnwrapRef<typeof import('vue')['nextTick']>
readonly onActivated: UnwrapRef<typeof import('vue')['onActivated']>
readonly onBeforeMount: UnwrapRef<typeof import('vue')['onBeforeMount']>
readonly onBeforeRouteLeave: UnwrapRef<typeof import('vue-router/auto')['onBeforeRouteLeave']>
readonly onBeforeRouteUpdate: UnwrapRef<typeof import('vue-router/auto')['onBeforeRouteUpdate']>
readonly onBeforeUnmount: UnwrapRef<typeof import('vue')['onBeforeUnmount']>
readonly onBeforeUpdate: UnwrapRef<typeof import('vue')['onBeforeUpdate']>
readonly onDeactivated: UnwrapRef<typeof import('vue')['onDeactivated']>
readonly onErrorCaptured: UnwrapRef<typeof import('vue')['onErrorCaptured']>
readonly onMounted: UnwrapRef<typeof import('vue')['onMounted']>
readonly onRenderTracked: UnwrapRef<typeof import('vue')['onRenderTracked']>
readonly onRenderTriggered: UnwrapRef<typeof import('vue')['onRenderTriggered']>
readonly onScopeDispose: UnwrapRef<typeof import('vue')['onScopeDispose']>
readonly onServerPrefetch: UnwrapRef<typeof import('vue')['onServerPrefetch']>
readonly onUnmounted: UnwrapRef<typeof import('vue')['onUnmounted']>
readonly onUpdated: UnwrapRef<typeof import('vue')['onUpdated']>
readonly parseNetworkConfig: UnwrapRef<typeof import('./composables/network')['parseNetworkConfig']>
readonly provide: UnwrapRef<typeof import('vue')['provide']>
readonly reactive: UnwrapRef<typeof import('vue')['reactive']>
readonly readonly: UnwrapRef<typeof import('vue')['readonly']>
readonly ref: UnwrapRef<typeof import('vue')['ref']>
readonly resolveComponent: UnwrapRef<typeof import('vue')['resolveComponent']>
readonly retainNetworkInstance: UnwrapRef<typeof import('./composables/network')['retainNetworkInstance']>
readonly runNetworkInstance: UnwrapRef<typeof import('./composables/network')['runNetworkInstance']>
readonly setActivePinia: UnwrapRef<typeof import('pinia')['setActivePinia']>
readonly setAutoLaunchStatus: UnwrapRef<typeof import('./composables/network')['setAutoLaunchStatus']>
readonly setMapStoreSuffix: UnwrapRef<typeof import('pinia')['setMapStoreSuffix']>
readonly shallowReactive: UnwrapRef<typeof import('vue')['shallowReactive']>
readonly shallowReadonly: UnwrapRef<typeof import('vue')['shallowReadonly']>
readonly shallowRef: UnwrapRef<typeof import('vue')['shallowRef']>
readonly storeToRefs: UnwrapRef<typeof import('pinia')['storeToRefs']>
readonly toRaw: UnwrapRef<typeof import('vue')['toRaw']>
readonly toRef: UnwrapRef<typeof import('vue')['toRef']>
readonly toRefs: UnwrapRef<typeof import('vue')['toRefs']>
readonly toValue: UnwrapRef<typeof import('vue')['toValue']>
readonly triggerRef: UnwrapRef<typeof import('vue')['triggerRef']>
readonly unref: UnwrapRef<typeof import('vue')['unref']>
readonly useAttrs: UnwrapRef<typeof import('vue')['useAttrs']>
readonly useCssModule: UnwrapRef<typeof import('vue')['useCssModule']>
readonly useCssVars: UnwrapRef<typeof import('vue')['useCssVars']>
readonly useI18n: UnwrapRef<typeof import('vue-i18n')['useI18n']>
readonly useLink: UnwrapRef<typeof import('vue-router/auto')['useLink']>
readonly useNetworkStore: UnwrapRef<typeof import('./stores/network')['useNetworkStore']>
readonly useRoute: UnwrapRef<typeof import('vue-router/auto')['useRoute']>
readonly useRouter: UnwrapRef<typeof import('vue-router/auto')['useRouter']>
readonly useSlots: UnwrapRef<typeof import('vue')['useSlots']>
readonly watch: UnwrapRef<typeof import('vue')['watch']>
readonly watchEffect: UnwrapRef<typeof import('vue')['watchEffect']>
readonly watchPostEffect: UnwrapRef<typeof import('vue')['watchPostEffect']>
readonly watchSyncEffect: UnwrapRef<typeof import('vue')['watchSyncEffect']>
}
}

View File

@@ -1,55 +1,84 @@
<script setup lang="ts">
import InputGroup from "primevue/inputgroup";
import InputGroupAddon from "primevue/inputgroupaddon";
import { ref, defineProps, computed } from "vue";
import { i18n, useNetworkStore, NetworkingMethod } from "../main";
const networking_methods = ref([
{ value: NetworkingMethod.PublicServer, label: i18n.global.t('public_server') },
{ value: NetworkingMethod.Manual, label: i18n.global.t('manual') },
{ value: NetworkingMethod.Standalone, label: i18n.global.t('standalone') },
]);
import InputGroup from 'primevue/inputgroup'
import InputGroupAddon from 'primevue/inputgroupaddon'
import { getOsHostname } from '~/composables/network'
import { NetworkingMethod } from '~/types/network'
const { t } = useI18n()
const props = defineProps<{
configInvalid?: boolean,
instanceId?: string,
configInvalid?: boolean
instanceId?: string
}>()
defineEmits(["runNetwork"]);
defineEmits(['runNetwork'])
const networkStore = useNetworkStore();
const networking_methods = ref([
{ value: NetworkingMethod.PublicServer, label: t('public_server') },
{ value: NetworkingMethod.Manual, label: t('manual') },
{ value: NetworkingMethod.Standalone, label: t('standalone') },
])
const networkStore = useNetworkStore()
const curNetwork = computed(() => {
if (props.instanceId) {
console.log("instanceId", props.instanceId);
const c = networkStore.networkList.find(n => n.instance_id == props.instanceId);
if (c != undefined) {
return c;
}
// console.log('instanceId', props.instanceId)
const c = networkStore.networkList.find(n => n.instance_id === props.instanceId)
if (c !== undefined)
return c
}
return networkStore.curNetwork;
});
return networkStore.curNetwork
})
const presetPublicServers = [
"tcp://easytier.public.kkrainbow.top:11010",
];
'tcp://easytier.public.kkrainbow.top:11010',
]
function validateHostname() {
if (curNetwork.value.hostname) {
// eslint no-useless-escape
let name = curNetwork.value.hostname!.replaceAll(/[^\u4E00-\u9FA5a-zA-Z0-9\-]*/g, '')
if (name.length > 32)
name = name.substring(0, 32)
if (curNetwork.value.hostname !== name)
curNetwork.value.hostname = name
}
}
const osHostname = ref<string>('')
onMounted(async () => {
osHostname.value = await getOsHostname()
})
</script>
<template>
<div class="flex flex-column h-full">
<div class="flex flex-column">
<div class="w-10/12 max-w-fit self-center ">
<Panel header="Basic Settings">
<div class="w-7/12 self-center ">
<Message severity="warn">
{{ t('dhcp_experimental_warning') }}
</Message>
</div>
<div class="w-7/12 self-center ">
<Panel :header="t('basic_settings')">
<div class="flex flex-column gap-y-2">
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-column gap-2 basis-5/12 grow">
<label for="virtual_ip">{{ $t('virtual_ipv4') }}</label>
<div class="flex align-items-center" for="virtual_ip">
<label class="mr-2"> {{ t('virtual_ipv4') }} </label>
<Checkbox v-model="curNetwork.dhcp" input-id="virtual_ip_auto" :binary="true" />
<label for="virtual_ip_auto" class="ml-2">
{{ t('virtual_ipv4_dhcp') }}
</label>
</div>
<InputGroup>
<InputText id="virtual_ip" v-model="curNetwork.virtual_ipv4" aria-describedby="virtual_ipv4-help" />
<InputText
id="virtual_ip" v-model="curNetwork.virtual_ipv4" :disabled="curNetwork.dhcp"
aria-describedby="virtual_ipv4-help"
/>
<InputGroupAddon>
<span>/24</span>
</InputGroupAddon>
@@ -59,38 +88,64 @@ const presetPublicServers = [
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-column gap-2 basis-5/12 grow">
<label for="network_name">{{ $t('network_name') }}</label>
<label for="network_name">{{ t('network_name') }}</label>
<InputText id="network_name" v-model="curNetwork.network_name" aria-describedby="network_name-help" />
</div>
<div class="flex flex-column gap-2 basis-5/12 grow">
<label for="network_secret">{{ $t('network_secret') }}</label>
<InputText id="network_secret" v-model="curNetwork.network_secret"
aria-describedby=" network_secret-help" />
<label for="network_secret">{{ t('network_secret') }}</label>
<InputText
id="network_secret" v-model="curNetwork.network_secret"
aria-describedby=" network_secret-help"
/>
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-column gap-2 basis-5/12 grow">
<label for="nm">{{ $t('networking_method') }}</label>
<label for="nm">{{ t('networking_method') }}</label>
<div class="items-center flex flex-row p-fluid gap-x-1">
<Dropdown v-model="curNetwork.networking_method" :options="networking_methods" optionLabel="label"
optionValue="value" placeholder="Select Method" class="" />
<Chips id="chips" v-model="curNetwork.peer_urls"
:placeholder="$t('chips_placeholder', ['tcp://8.8.8.8:11010'])" separator=" " class="grow"
v-if="curNetwork.networking_method == NetworkingMethod.Manual" />
<Dropdown
v-model="curNetwork.networking_method" :options="networking_methods" option-label="label"
option-value="value" placeholder="Select Method" class=""
/>
<Chips
v-if="curNetwork.networking_method === NetworkingMethod.Manual" id="chips"
v-model="curNetwork.peer_urls" :placeholder="t('chips_placeholder', ['tcp://8.8.8.8:11010'])"
separator=" " class="grow"
/>
<Dropdown :editable="true" v-model="curNetwork.public_server_url" class="grow"
<Dropdown
v-if="curNetwork.networking_method === NetworkingMethod.PublicServer"
v-model="curNetwork.public_server_url" :editable="true" class="grow"
:options="presetPublicServers"
v-if="curNetwork.networking_method == NetworkingMethod.PublicServer" />
/>
</div>
</div>
</div>
</div>
</Panel>
<Divider />
<Panel :header="t('advanced_settings')" toggleable collapsed>
<div class="flex flex-column gap-y-2">
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-column gap-2 basis-5/12 grow">
<label for="hostname">{{ t('hostname') }}</label>
<InputText
id="hostname" v-model="curNetwork.hostname" aria-describedby="hostname-help" :format="true"
:placeholder="t('hostname_placeholder', [osHostname])" @blur="validateHostname"
/>
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap w-full">
<div class="flex flex-column gap-2 grow p-fluid">
<label for="username">{{ $t('proxy_cidrs') }}</label>
<Chips id="chips" v-model="curNetwork.proxy_cidrs"
:placeholder="$t('chips_placeholder', ['10.0.0.0/24'])" separator=" " class="w-full" />
<label for="username">{{ t('proxy_cidrs') }}</label>
<Chips
id="chips" v-model="curNetwork.proxy_cidrs"
:placeholder="t('chips_placeholder', ['10.0.0.0/24'])" separator=" " class="w-full"
/>
</div>
</div>
@@ -98,55 +153,56 @@ const presetPublicServers = [
<div class="flex flex-column gap-2 grow">
<label for="username">VPN Portal</label>
<div class="items-center flex flex-row gap-x-4">
<ToggleButton onIcon="pi pi-check" offIcon="pi pi-times" v-model="curNetwork.enable_vpn_portal"
:onLabel="$t('off_text')" :offLabel="$t('on_text')" />
<div class="grow" v-if="curNetwork.enable_vpn_portal">
<ToggleButton
v-model="curNetwork.enable_vpn_portal" on-icon="pi pi-check" off-icon="pi pi-times"
:on-label="t('off_text')" :off-label="t('on_text')"
/>
<div v-if="curNetwork.enable_vpn_portal" class="grow">
<InputGroup>
<InputText :placeholder="$t('vpn_portal_client_network')"
v-model="curNetwork.vpn_portal_client_network_addr" />
<InputText
v-model="curNetwork.vpn_portal_client_network_addr"
:placeholder="t('vpn_portal_client_network')"
/>
<InputGroupAddon>
<span>/{{ curNetwork.vpn_portal_client_network_len }}</span>
</InputGroupAddon>
</InputGroup>
</div>
<InputNumber :placeholder="$t('vpn_portal_listen_port')" class="" v-if="curNetwork.enable_vpn_portal"
:format="false" v-model="curNetwork.vpn_portal_listne_port" :min="0" :max="65535" />
<InputNumber
v-if="curNetwork.enable_vpn_portal" v-model="curNetwork.vpn_portal_listen_port"
:placeholder="t('vpn_portal_listen_port')" class="" :format="false" :min="0" :max="65535"
/>
</div>
</div>
</div>
</div>
</Panel>
<Divider />
<Panel :header="$t('advanced_settings')" toggleable>
<div class="flex flex-column gap-y-2">
<div class="flex flex-row gap-x-9 flex-wrap w-full">
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-column gap-2 grow p-fluid">
<label for="listener_urls">{{ $t('listener_urls') }}</label>
<Chips id="listener_urls" v-model="curNetwork.listener_urls"
:placeholder="$t('chips_placeholder', ['tcp://1.1.1.1:11010'])" separator=" " class="w-full" />
<label for="listener_urls">{{ t('listener_urls') }}</label>
<Chips
id="listener_urls" v-model="curNetwork.listener_urls"
:placeholder="t('chips_placeholder', ['tcp://1.1.1.1:11010'])" separator=" " class="w-full"
/>
</div>
</div>
<div class="flex flex-row gap-x-9 flex-wrap">
<div class="flex flex-column gap-2 basis-5/12 grow">
<label for="rpc_port">{{ $t('rpc_port') }}</label>
<InputNumber id="rpc_port" v-model="curNetwork.rpc_port" aria-describedby="username-help"
:format="false" :min="0" :max="65535" />
<label for="rpc_port">{{ t('rpc_port') }}</label>
<InputNumber
id="rpc_port" v-model="curNetwork.rpc_port" aria-describedby="username-help"
:format="false" :min="0" :max="65535"
/>
</div>
</div>
</div>
</Panel>
<Divider />
<div class="flex pt-4 justify-content-center">
<Button :label="$t('run_network')" icon="pi pi-arrow-right" iconPos="right" @click="$emit('runNetwork', curNetwork)"
:disabled="configInvalid" />
<Button
:label="t('run_network')" icon="pi pi-arrow-right" icon-pos="right" :disabled="configInvalid"
@click="$emit('runNetwork', curNetwork)"
/>
</div>
</div>
</div>

View File

@@ -1,358 +1,381 @@
<script setup lang="ts">
import { ref, computed, onMounted, onUnmounted } from 'vue'
import { useNetworkStore } from '../main';
const networkStore = useNetworkStore();
import type { NodeInfo } from '~/types/network'
const { t } = useI18n()
const props = defineProps<{
instanceId?: string,
instanceId?: string
}>()
const networkStore = useNetworkStore()
const curNetwork = computed(() => {
if (props.instanceId) {
console.log("instanceId", props.instanceId);
const c = networkStore.networkList.find(n => n.instance_id == props.instanceId);
if (c != undefined) {
return c;
}
}
if (props.instanceId) {
// console.log('instanceId', props.instanceId)
const c = networkStore.networkList.find(n => n.instance_id === props.instanceId)
if (c !== undefined)
return c
}
return networkStore.curNetwork;
});
return networkStore.curNetwork
})
let curNetworkInst = computed(() => {
return networkStore.networkInstances.find(n => n.instance_id == curNetwork.value.instance_id);
});
const curNetworkInst = computed(() => {
return networkStore.networkInstances.find(n => n.instance_id === curNetwork.value.instance_id)
})
let peerRouteInfos = computed(() => {
if (curNetworkInst.value) {
return curNetworkInst.value.detail.peer_route_pairs;
}
return [];
});
const peerRouteInfos = computed(() => {
if (curNetworkInst.value)
return curNetworkInst.value.detail?.peer_route_pairs || []
let routeCost = (info: any) => {
if (info.route) {
const cost = info.route.cost;
return cost == 1 ? "p2p" : `relay(${cost})`
}
return '?';
};
return []
})
function resolveObjPath(path: string, obj = self, separator = '.') {
var properties = Array.isArray(path) ? path : path.split(separator)
return properties.reduce((prev, curr) => prev?.[curr], obj)
function routeCost(info: any) {
if (info.route) {
const cost = info.route.cost
return cost === 1 ? 'p2p' : `relay(${cost})`
}
return '?'
}
let statsCommon = (info: any, field: string) => {
if (!info.peer) {
return undefined;
}
let conns = info.peer.conns;
return conns.reduce((acc: number, conn: any) => {
return acc + resolveObjPath(field, conn);
}, 0);
};
function resolveObjPath(path: string, obj = globalThis, separator = '.') {
const properties = Array.isArray(path) ? path : path.split(separator)
return properties.reduce((prev, curr) => prev?.[curr], obj)
}
function statsCommon(info: any, field: string): number | undefined {
if (!info.peer)
return undefined
const conns = info.peer.conns
return conns.reduce((acc: number, conn: any) => {
return acc + resolveObjPath(field, conn)
}, 0)
}
function humanFileSize(bytes: number, si = false, dp = 1) {
const thresh = si ? 1000 : 1024;
const thresh = si ? 1000 : 1024
if (Math.abs(bytes) < thresh) {
return bytes + ' B';
}
if (Math.abs(bytes) < thresh)
return `${bytes} B`
const units = si
? ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
: ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];
let u = -1;
const r = 10 ** dp;
const units = si
? ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
: ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
let u = -1
const r = 10 ** dp
do {
bytes /= thresh;
++u;
} while (Math.round(Math.abs(bytes) * r) / r >= thresh && u < units.length - 1);
do {
bytes /= thresh
++u
} while (Math.round(Math.abs(bytes) * r) / r >= thresh && u < units.length - 1)
return bytes.toFixed(dp) + ' ' + units[u];
return `${bytes.toFixed(dp)} ${units[u]}`
}
let latencyMs = (info: any) => {
let lat_us_sum = statsCommon(info, 'stats.latency_us');
return lat_us_sum ? `${lat_us_sum / 1000 / info.peer.conns.length}ms` : '';
};
let txBytes = (info: any) => {
let tx = statsCommon(info, 'stats.tx_bytes');
return tx ? humanFileSize(tx) : '';
function latencyMs(info: any) {
let lat_us_sum = statsCommon(info, 'stats.latency_us')
if (lat_us_sum === undefined)
return ''
lat_us_sum = lat_us_sum / 1000 / info.peer.conns.length
return `${lat_us_sum % 1 > 0 ? Math.round(lat_us_sum) + 1 : Math.round(lat_us_sum)}ms`
}
let rxBytes = (info: any) => {
let rx = statsCommon(info, 'stats.rx_bytes');
return rx ? humanFileSize(rx) : '';
function txBytes(info: any) {
const tx = statsCommon(info, 'stats.tx_bytes')
return tx ? humanFileSize(tx) : ''
}
let lossRate = (info: any) => {
let lossRate = statsCommon(info, 'loss_rate');
return lossRate != undefined ? `${Math.round(lossRate * 100)}%` : '';
function rxBytes(info: any) {
const rx = statsCommon(info, 'stats.rx_bytes')
return rx ? humanFileSize(rx) : ''
}
function lossRate(info: any) {
const lossRate = statsCommon(info, 'loss_rate')
return lossRate !== undefined ? `${Math.round(lossRate * 100)}%` : ''
}
const myNodeInfo = computed(() => {
if (!curNetworkInst.value) {
return {};
}
return curNetworkInst.value.detail?.my_node_info;
});
if (!curNetworkInst.value)
return {} as NodeInfo
return curNetworkInst.value.detail?.my_node_info
})
interface Chip {
label: string;
icon: string;
label: string
icon: string
}
let myNodeInfoChips = computed(() => {
if (!curNetworkInst.value) {
return [];
const myNodeInfoChips = computed(() => {
if (!curNetworkInst.value)
return []
const chips: Array<Chip> = []
const my_node_info = curNetworkInst.value.detail?.my_node_info
if (!my_node_info)
return chips
// virtual ipv4
chips.push({
label: `Virtual IPv4: ${my_node_info.virtual_ipv4}`,
icon: '',
} as Chip)
// local ipv4s
const local_ipv4s = my_node_info.ips?.interface_ipv4s
for (const [idx, ip] of local_ipv4s?.entries()) {
chips.push({
label: `Local IPv4 ${idx}: ${ip}`,
icon: '',
} as Chip)
}
// local ipv6s
const local_ipv6s = my_node_info.ips?.interface_ipv6s
for (const [idx, ip] of local_ipv6s?.entries()) {
chips.push({
label: `Local IPv6 ${idx}: ${ip}`,
icon: '',
} as Chip)
}
// public ip
const public_ip = my_node_info.ips?.public_ipv4
if (public_ip) {
chips.push({
label: `Public IP: ${public_ip}`,
icon: '',
} as Chip)
}
// listeners:
const listeners = my_node_info.listeners
for (const [idx, listener] of listeners?.entries()) {
chips.push({
label: `Listener ${idx}: ${listener}`,
icon: '',
} as Chip)
}
// udp nat type
enum NatType {
// has NAT; but own a single public IP, port is not changed
Unknown = 0,
OpenInternet = 1,
NoPAT = 2,
FullCone = 3,
Restricted = 4,
PortRestricted = 5,
Symmetric = 6,
SymUdpFirewall = 7,
};
const udpNatType: NatType = my_node_info.stun_info?.udp_nat_type
if (udpNatType !== undefined) {
const udpNatTypeStrMap = {
[NatType.Unknown]: 'Unknown',
[NatType.OpenInternet]: 'Open Internet',
[NatType.NoPAT]: 'No PAT',
[NatType.FullCone]: 'Full Cone',
[NatType.Restricted]: 'Restricted',
[NatType.PortRestricted]: 'Port Restricted',
[NatType.Symmetric]: 'Symmetric',
[NatType.SymUdpFirewall]: 'Symmetric UDP Firewall',
}
let chips: Array<Chip> = [];
let my_node_info = curNetworkInst.value.detail?.my_node_info;
if (!my_node_info) {
return chips;
}
chips.push({
label: `UDP NAT Type: ${udpNatTypeStrMap[udpNatType]}`,
icon: '',
} as Chip)
}
// local ipv4s
let local_ipv4s = my_node_info.ips?.interface_ipv4s;
for (let [idx, ip] of local_ipv4s?.entries()) {
chips.push({
label: `Local IPv4 ${idx}: ${ip}`,
icon: '',
} as Chip);
}
return chips
})
// local ipv6s
let local_ipv6s = my_node_info.ips?.interface_ipv6s;
for (let [idx, ip] of local_ipv6s?.entries()) {
chips.push({
label: `Local IPv6 ${idx}: ${ip}`,
icon: '',
} as Chip);
}
function globalSumCommon(field: string) {
let sum = 0
if (!peerRouteInfos.value)
return sum
// public ip
let public_ip = my_node_info.ips?.public_ipv4;
if (public_ip) {
chips.push({
label: `Public IP: ${public_ip}`,
icon: '',
} as Chip);
}
// listeners:
let listeners = my_node_info.listeners;
for (let [idx, listener] of listeners?.entries()) {
chips.push({
label: `Listener ${idx}: ${listener}`,
icon: '',
} as Chip);
}
// udp nat type
enum NatType {
// has NAT; but own a single public IP, port is not changed
Unknown = 0,
OpenInternet = 1,
NoPAT = 2,
FullCone = 3,
Restricted = 4,
PortRestricted = 5,
Symmetric = 6,
SymUdpFirewall = 7,
};
let udpNatType: NatType = my_node_info.stun_info?.udp_nat_type;
if (udpNatType != undefined) {
let udpNatTypeStrMap = {
[NatType.Unknown]: 'Unknown',
[NatType.OpenInternet]: 'Open Internet',
[NatType.NoPAT]: 'No PAT',
[NatType.FullCone]: 'Full Cone',
[NatType.Restricted]: 'Restricted',
[NatType.PortRestricted]: 'Port Restricted',
[NatType.Symmetric]: 'Symmetric',
[NatType.SymUdpFirewall]: 'Symmetric UDP Firewall',
};
chips.push({
label: `UDP NAT Type: ${udpNatTypeStrMap[udpNatType]}`,
icon: '',
} as Chip);
}
return chips;
});
const globalSumCommon = (field: string) => {
let sum = 0;
if (!peerRouteInfos.value) {
return sum;
}
for (let info of peerRouteInfos.value) {
let tx = statsCommon(info, field);
if (tx) {
sum += tx;
}
}
return sum;
};
const txGlobalSum = () => {
return globalSumCommon('stats.tx_bytes');
};
const rxGlobalSum = () => {
return globalSumCommon('stats.rx_bytes');
for (const info of peerRouteInfos.value) {
const tx = statsCommon(info, field)
if (tx)
sum += tx
}
return sum
}
function txGlobalSum() {
return globalSumCommon('stats.tx_bytes')
}
function rxGlobalSum() {
return globalSumCommon('stats.rx_bytes')
}
const peerCount = computed(() => {
if (!peerRouteInfos.value) {
return 0;
}
return peerRouteInfos.value.length;
});
if (!peerRouteInfos.value)
return 0
return peerRouteInfos.value.length
})
// calculate tx/rx rate every 2 seconds
let rateIntervalId = 0;
let rateInterval = 2000;
let prevTxSum = 0;
let prevRxSum = 0;
let txRate = ref('0');
let rxRate = ref('0');
let rateIntervalId = 0
const rateInterval = 2000
let prevTxSum = 0
let prevRxSum = 0
const txRate = ref('0')
const rxRate = ref('0')
onMounted(() => {
rateIntervalId = setInterval(() => {
let curTxSum = txGlobalSum();
txRate.value = humanFileSize((curTxSum - prevTxSum) / (rateInterval / 1000));
prevTxSum = curTxSum;
rateIntervalId = window.setInterval(() => {
const curTxSum = txGlobalSum()
txRate.value = humanFileSize((curTxSum - prevTxSum) / (rateInterval / 1000))
prevTxSum = curTxSum
let curRxSum = rxGlobalSum();
rxRate.value = humanFileSize((curRxSum - prevRxSum) / (rateInterval / 1000));
prevRxSum = curRxSum;
}, rateInterval);
});
const curRxSum = rxGlobalSum()
rxRate.value = humanFileSize((curRxSum - prevRxSum) / (rateInterval / 1000))
prevRxSum = curRxSum
}, rateInterval)
})
onUnmounted(() => {
clearInterval(rateIntervalId);
});
clearInterval(rateIntervalId)
})
const dialogVisible = ref(false);
const dialogContent = ref('');
const dialogVisible = ref(false)
const dialogContent = ref<any>('')
const dialogHeader = ref('event_log')
const showVpnPortalConfig = () => {
let my_node_info = myNodeInfo.value;
if (!my_node_info) {
return;
}
const url = "https://www.wireguardconfig.com/qrcode";
dialogContent.value = `${my_node_info.vpn_portal_cfg}\n\n # can generate QR code: ${url}`;
dialogVisible.value = true;
function showVpnPortalConfig() {
const my_node_info = myNodeInfo.value
if (!my_node_info)
return
const url = 'https://www.wireguardconfig.com/qrcode'
dialogContent.value = `${my_node_info.vpn_portal_cfg}\n\n # can generate QR code: ${url}`
dialogHeader.value = 'vpn_portal_config'
dialogVisible.value = true
}
const showEventLogs = () => {
let detail = curNetworkInst.value?.detail;
if (!detail) {
return;
}
dialogContent.value = detail.events;
dialogVisible.value = true;
}
function showEventLogs() {
const detail = curNetworkInst.value?.detail
if (!detail)
return
dialogContent.value = detail.events
dialogHeader.value = 'event_log'
dialogVisible.value = true
}
</script>
<template>
<div>
<Dialog v-model:visible="dialogVisible" modal header="Dialog" :style="{ width: '70%' }">
<Panel>
<ScrollPanel style="width: 100%; height: 400px">
<pre>{{ dialogContent }}</pre>
</ScrollPanel>
</Panel>
<Divider />
<div class="flex justify-content-end gap-2">
<Button type="button" label="Close" @click="dialogVisible = false"></Button>
<div>
<Dialog v-model:visible="dialogVisible" modal :header="t(dialogHeader)" :style="{ width: '70%' }">
<Panel>
<ScrollPanel style="width: 100%; height: 400px">
<pre>{{ dialogContent }}</pre>
</ScrollPanel>
</Panel>
<Divider />
<div class="flex justify-content-end gap-2">
<Button type="button" :label="t('close')" @click="dialogVisible = false" />
</div>
</Dialog>
<Card v-if="curNetworkInst?.error_msg">
<template #title>
Run Network Error
</template>
<template #content>
<div class="flex flex-column gap-y-5">
<div class="text-red-500">
{{ curNetworkInst.error_msg }}
</div>
</div>
</template>
</Card>
<template v-else>
<Card>
<template #title>
{{ t('my_node_info') }}
</template>
<template #content>
<div class="flex w-full flex-column gap-y-5">
<div class="m-0 flex flex-row justify-center gap-x-5">
<div
class="rounded-full w-32 h-32 flex flex-column align-items-center pt-4"
style="border: 1px solid green"
>
<div class="font-bold">
{{ t('peer_count') }}
</div>
<div class="text-5xl mt-1">
{{ peerCount }}
</div>
</div>
<div
class="rounded-full w-32 h-32 flex flex-column align-items-center pt-4"
style="border: 1px solid purple"
>
<div class="font-bold">
{{ t('upload') }}
</div>
<div class="text-xl mt-2">
{{ txRate }}/s
</div>
</div>
<div
class="rounded-full w-32 h-32 flex flex-column align-items-center pt-4"
style="border: 1px solid fuchsia"
>
<div class="font-bold">
{{ t('download') }}
</div>
<div class="text-xl mt-2">
{{ rxRate }}/s
</div>
</div>
</div>
</Dialog>
<Card v-if="curNetworkInst?.error_msg">
<template #title>Run Network Error</template>
<template #content>
<div class="flex flex-column gap-y-5">
<div class="text-red-500">
{{ curNetworkInst.error_msg }}
</div>
</div>
</template>
</Card>
<div class="flex flex-row align-items-center flex-wrap w-full">
<Chip
v-for="(chip, i) in myNodeInfoChips" :key="i" :label="chip.label" :icon="chip.icon"
class="mr-2 mt-2"
/>
</div>
<Card v-if="!curNetworkInst?.error_msg">
<template #title>{{ $t('my_node_info') }}</template>
<template #content>
<div class="flex w-full flex-column gap-y-5">
<div class="m-0 flex flex-row justify-center gap-x-5">
<div class="rounded-full w-36 h-36 flex flex-column align-items-center pt-4"
style="border: 1px solid green">
<div class="font-bold">
{{ $t('peer_count') }}
</div>
<div class="text-5xl mt-1">{{ peerCount }}</div>
</div>
<div v-if="myNodeInfo" class="m-0 flex flex-row justify-center gap-x-5 text-sm">
<Button severity="info" :label="t('show_vpn_portal_config')" @click="showVpnPortalConfig" />
<Button severity="info" :label="t('show_event_log')" @click="showEventLogs" />
</div>
</div>
</template>
</Card>
<div class="rounded-full w-36 h-36 flex flex-column align-items-center pt-4"
style="border: 1px solid purple">
<div class="font-bold">
{{ $t('upload') }}
</div>
<div class="text-xl mt-2">{{ txRate }}/s</div>
</div>
<Divider />
<div class="rounded-full w-36 h-36 flex flex-column align-items-center pt-4"
style="border: 1px solid fuchsia">
<div class="font-bold">
{{ $t('download') }}
</div>
<div class="text-xl mt-2">{{ rxRate }}/s</div>
</div>
</div>
<div class="flex flex-row align-items-center flex-wrap w-full">
<Chip v-for="chip in myNodeInfoChips" :label="chip.label" :icon="chip.icon" class="mr-2 mt-2">
</Chip>
</div>
<div class="m-0 flex flex-row justify-center gap-x-5 text-sm" v-if="myNodeInfo">
<Button severity="info" :label="$t('show_vpn_portal_config')" @click="showVpnPortalConfig" />
<Button severity="info" :label="$t('show_event_log')" @click="showEventLogs" />
</div>
</div>
</template>
</Card>
<Divider />
<Card v-if="!curNetworkInst?.error_msg">
<template #title>{{ $t('peer_info') }}</template>
<template #content>
<DataTable :value="peerRouteInfos" tableStyle="min-width: 50rem">
<Column field="route.ipv4_addr" :header="$t('virtual_ipv4')"></Column>
<Column field="route.hostname" :header="$t('hostname')"></Column>
<Column :field="routeCost" :header="$t('route_cost')"></Column>
<Column :field="latencyMs" :header="$t('latency')"></Column>
<Column :field="txBytes" :header="$t('upload_bytes')"></Column>
<Column :field="rxBytes" :header="$t('download_bytes')"></Column>
<Column :field="lossRate" :header="$t('loss_rate')"></Column>
</DataTable>
</template>
</Card>
</div>
</template>
<Card>
<template #title>
{{ t('peer_info') }}
</template>
<template #content>
<DataTable :value="peerRouteInfos" column-resize-mode="fit" table-style="width: 100%">
<Column field="route.ipv4_addr" style="width: 100px;" :header="t('virtual_ipv4')" />
<Column field="route.hostname" style="max-width: 250px;" :header="t('hostname')" />
<Column :field="routeCost" style="width: 100px;" :header="t('route_cost')" />
<Column :field="latencyMs" style="width: 80px;" :header="t('latency')" />
<Column :field="txBytes" style="width: 80px;" :header="t('upload_bytes')" />
<Column :field="rxBytes" style="width: 80px;" :header="t('download_bytes')" />
<Column :field="lossRate" style="width: 100px;" :header="t('loss_rate')" />
</DataTable>
</template>
</Card>
</template>
</div>
</template>

View File

@@ -0,0 +1,26 @@
import { invoke } from '@tauri-apps/api/tauri'
import type { NetworkConfig, NetworkInstanceRunningInfo } from '~/types/network'
export async function parseNetworkConfig(cfg: NetworkConfig) {
return invoke<string>('parse_network_config', { cfg })
}
export async function runNetworkInstance(cfg: NetworkConfig) {
return invoke('run_network_instance', { cfg })
}
export async function retainNetworkInstance(instanceIds: string[]) {
return invoke('retain_network_instance', { instanceIds })
}
export async function collectNetworkInfos() {
return await invoke<Record<string, NetworkInstanceRunningInfo>>('collect_network_infos')
}
export async function getOsHostname() {
return await invoke<string>('get_os_hostname')
}
export async function setAutoLaunchStatus(enable: boolean) {
return await invoke<boolean>('set_auto_launch_status', { enable })
}

View File

@@ -0,0 +1,3 @@
<template>
<RouterView />
</template>

View File

@@ -1,362 +1,49 @@
import "./styles.css";
import "primevue/resources/themes/aura-light-green/theme.css";
import "primeicons/primeicons.css";
import "primeflex/primeflex.css";
import { setupLayouts } from 'virtual:generated-layouts'
import { createRouter, createWebHistory } from 'vue-router/auto'
import { createPinia, defineStore } from 'pinia'
import PrimeVue from 'primevue/config'
import ToastService from 'primevue/toastservice'
import App from '~/App.vue'
import { createMemoryHistory, createRouter } from 'vue-router'
import '~/styles.css'
import 'primevue/resources/themes/aura-light-green/theme.css'
import 'primeicons/primeicons.css'
import 'primeflex/primeflex.css'
import { i18n, loadLanguageAsync } from '~/modules/i18n'
import { loadAutoLaunchStatusAsync, getAutoLaunchStatusAsync } from './modules/auto_launch'
import { createApp } from "vue";
import PrimeVue from 'primevue/config';
import App from "./App.vue";
import { invoke } from "@tauri-apps/api/tauri";
if (import.meta.env.PROD) {
document.addEventListener('keydown', (event) => {
if (
event.key === 'F5'
|| (event.ctrlKey && event.key === 'r')
|| (event.metaKey && event.key === 'r')
)
event.preventDefault()
})
import { v4 as uuidv4 } from 'uuid';
import ToastService from 'primevue/toastservice';
const pinia = createPinia()
export enum NetworkingMethod {
PublicServer = "PublicServer",
Manual = "Manual",
Standalone = "Standalone",
document.addEventListener('contextmenu', (event) => {
event.preventDefault()
})
}
export interface NetworkConfig {
instance_id: string,
async function main() {
await loadLanguageAsync(localStorage.getItem('lang') || 'en')
await loadAutoLaunchStatusAsync(getAutoLaunchStatusAsync())
virtual_ipv4: string
network_name: string
network_secret: string
const app = createApp(App)
networking_method: NetworkingMethod,
const router = createRouter({
history: createWebHistory(),
extendRoutes: routes => setupLayouts(routes),
})
public_server_url: string,
peer_urls: Array<string>,
proxy_cidrs: Array<string>,
enable_vpn_portal: boolean,
vpn_portal_listne_port: number,
vpn_portal_client_network_addr: string,
vpn_portal_client_network_len: number,
advanced_settings: boolean,
listener_urls: Array<string>,
rpc_port: number,
app.use(router)
app.use(createPinia())
app.use(i18n, { useScope: 'global' })
app.use(PrimeVue)
app.use(ToastService)
app.mount('#app')
}
function default_network(): NetworkConfig {
return {
instance_id: uuidv4(),
virtual_ipv4: "",
network_name: "default",
network_secret: "",
networking_method: NetworkingMethod.PublicServer,
public_server_url: "tcp://easytier.public.kkrainbow.top:11010",
peer_urls: [],
proxy_cidrs: [],
enable_vpn_portal: false,
vpn_portal_listne_port: 22022,
vpn_portal_client_network_addr: "",
vpn_portal_client_network_len: 24,
advanced_settings: false,
listener_urls: [
"tcp://0.0.0.0:11010",
"udp://0.0.0.0:11010",
"wg://0.0.0.0:11011",
],
rpc_port: 15888,
}
}
export interface NetworkInstance {
instance_id: string,
running: boolean,
error_msg: string,
detail: any,
}
export const useNetworkStore = defineStore('network', {
state: () => {
const networkList = [default_network()];
return {
// for initially empty lists
networkList: networkList as NetworkConfig[],
// for data that is not yet loaded
curNetwork: networkList[0],
// uuid -> instance
instances: {} as Record<string, NetworkInstance>,
networkInfos: {} as Record<string, any>,
}
},
getters: {
lastNetwork(): NetworkConfig {
return this.networkList[this.networkList.length - 1];
},
curNetworkId(): string {
return this.curNetwork.instance_id;
},
networkInstances(): Array<NetworkInstance> {
return Object.values(this.instances);
},
networkInstanceIds(): Array<string> {
return Object.keys(this.instances);
}
},
actions: {
addNewNetwork() {
this.networkList.push(default_network());
},
delCurNetwork() {
const curNetworkIdx = this.networkList.indexOf(this.curNetwork);
this.networkList.splice(curNetworkIdx, 1);
const nextCurNetworkIdx = Math.min(curNetworkIdx, this.networkList.length - 1);
this.curNetwork = this.networkList[nextCurNetworkIdx];
},
removeNetworkInstance(instanceId: string) {
delete this.instances[instanceId];
},
addNetworkInstance(instanceId: string) {
this.instances[instanceId] = {
instance_id: instanceId,
running: false,
error_msg: "",
detail: {},
};
},
updateWithNetworkInfos(networkInfos: Record<string, any>) {
this.networkInfos = networkInfos;
for (const [instanceId, info] of Object.entries(networkInfos)) {
if (this.instances[instanceId] === undefined) {
this.addNetworkInstance(instanceId);
}
this.instances[instanceId].running = info["running"];
this.instances[instanceId].error_msg = info["error_msg"];
this.instances[instanceId].detail = info;
}
},
loadFromLocalStorage() {
const networkList = JSON.parse(localStorage.getItem("networkList") || '[]');
let result = [];
for (const cfg of networkList) {
result.push({
...default_network(),
...cfg,
});
}
if (result.length === 0) {
result.push(default_network());
}
this.networkList = result;
this.curNetwork = this.networkList[0];
},
saveToLocalStroage() {
localStorage.setItem("networkList", JSON.stringify(this.networkList));
}
}
})
export async function parseNetworkConfig(cfg: NetworkConfig): Promise<string> {
const ret: string = await invoke("parse_network_config", { cfg: JSON.stringify(cfg) });
return ret;
}
export async function runNetworkInstance(cfg: NetworkConfig) {
const ret: string = await invoke("run_network_instance", { cfg: JSON.stringify(cfg) });
return ret;
}
export async function retainNetworkInstance(instanceIds: Array<string>) {
const ret: string = await invoke("retain_network_instance", { instanceIds: JSON.stringify(instanceIds) });
return ret;
}
export async function collectNetworkInfos() {
const ret: string = await invoke("collect_network_infos", {});
return JSON.parse(ret);
}
import { createI18n } from 'vue-i18n'
const messages = {
en: {
"network": "Network",
"networking_method": "Networking Method",
"public_server": "Public Server",
"manual": "Manual",
"standalone": "Standalone",
"virtual_ipv4": "Virtual IPv4",
"network_name": "Network Name",
"network_secret": "Network Secret",
"public_server_url": "Public Server URL",
"peer_urls": "Peer URLs",
"proxy_cidrs": "Subnet Proxy CIDRs",
"enable_vpn_portal": "Enable VPN Portal",
"vpn_portal_listen_port": "VPN Portal Listen Port",
"vpn_portal_client_network": "Client Sub Network",
"advanced_settings": "Advanced Settings",
"listener_urls": "Listener URLs",
"rpc_port": "RPC Port",
"config_network": "Config Network",
"running": "Running",
"error_msg": "Error Message",
"detail": "Detail",
"add_new_network": "Add New Network",
"del_cur_network": "Delete Current Network",
"select_network": "Select Network",
"network_instances": "Network Instances",
"instance_id": "Instance ID",
"network_infos": "Network Infos",
"parse_network_config": "Parse Network Config",
"retain_network_instance": "Retain Network Instance",
"collect_network_infos": "Collect Network Infos",
"settings": "Settings",
"exchange_language": "切换中文",
"exit": "Exit",
"chips_placeholder": "e.g: {0}, press Enter to add",
"off_text": "Press to disable",
"on_text": "Press to enable",
"show_config": "Show Config",
"close": "Close",
"my_node_info": "My Node Info",
"peer_count": "Connected",
"upload": "Upload",
"download": "Download",
"show_vpn_portal_config": "Show VPN Portal Config",
"show_event_log": "Show Event Log",
"peer_info": "Peer Info",
"route_cost": "Route Cost",
"hostname": "Hostname",
"latency": "Latency",
"upload_bytes": "Upload",
"download_bytes": "Download",
"loss_rate": "Loss Rate",
"run_network": "Run Network",
"stop_network": "Stop Network",
},
cn: {
"network": "网络",
"networking_method": "网络方式",
"public_server": "公共服务器",
"manual": "手动",
"standalone": "独立",
"virtual_ipv4": "虚拟IPv4地址",
"network_name": "网络名称",
"network_secret": "网络密码",
"public_server_url": "公共服务器地址",
"peer_urls": "对等节点地址",
"proxy_cidrs": "子网代理CIDR",
"enable_vpn_portal": "启用VPN门户",
"vpn_portal_listen_port": "监听端口",
"vpn_portal_client_network": "客户端子网",
"advanced_settings": "高级设置",
"listener_urls": "监听地址",
"rpc_port": "RPC端口",
"config_network": "配置网络",
"running": "运行中",
"error_msg": "错误信息",
"detail": "详情",
"add_new_network": "添加新网络",
"del_cur_network": "删除当前网络",
"select_network": "选择网络",
"network_instances": "网络实例",
"instance_id": "实例ID",
"network_infos": "网络信息",
"parse_network_config": "解析网络配置",
"retain_network_instance": "保留网络实例",
"collect_network_infos": "收集网络信息",
"settings": "设置",
"exchange_language": "Switch to English",
"exit": "退出",
"chips_placeholder": "例如: {0}, 按回车添加",
"off_text": "点击关闭",
"on_text": "点击开启",
"show_config": "显示配置",
"close": "关闭",
"my_node_info": "当前节点信息",
"peer_count": "已连接",
"upload": "上传",
"download": "下载",
"show_vpn_portal_config": "显示VPN门户配置",
"show_event_log": "显示事件日志",
"peer_info": "节点信息",
"hostname": "主机名",
"route_cost": "路由",
"latency": "延迟",
"upload_bytes": "上传",
"download_bytes": "下载",
"loss_rate": "丢包率",
"run_network": "运行网络",
"stop_network": "停止网络",
}
}
function saveLocaleToLocalStorage(locale: string) {
localStorage.setItem("locale", locale);
}
export function loadLocaleFromLocalStorage(): 'en' | 'cn' {
const v = localStorage.getItem("locale")
if (v === 'en' || v === 'cn') {
return v;
} else {
return 'en';
}
}
export const i18n = createI18n({
legacy: false,
locale: 'en', // set locale
fallbackLocale: 'cn', // set fallback locale
messages,
})
export function changeLocale(locale: 'en' | 'cn') {
i18n.global.locale.value = locale;
saveLocaleToLocalStorage(locale);
}
const app = createApp(App);
app.use(i18n, { useScope: 'global' })
app.use(pinia)
app.use(PrimeVue);
app.use(ToastService);
app.mount("#app");
export const router = createRouter({
history: createMemoryHistory(),
routes: [{ path: "/", component: App }]
});
main()

View File

@@ -0,0 +1,16 @@
import { setAutoLaunchStatus } from "~/composables/network"
export async function loadAutoLaunchStatusAsync(enable: boolean): Promise<boolean> {
try {
const ret = await setAutoLaunchStatus(enable)
localStorage.setItem('auto_launch', JSON.stringify(ret))
return ret
} catch (e) {
console.error(e)
return false
}
}
export function getAutoLaunchStatusAsync(): boolean {
return localStorage.getItem('auto_launch') === 'true'
}

View File

@@ -0,0 +1,50 @@
import type { Locale } from 'vue-i18n'
import { createI18n } from 'vue-i18n'
// Import i18n resources
// https://vitejs.dev/guide/features.html#glob-import
export const i18n = createI18n({
legacy: false,
locale: '',
fallbackLocale: '',
messages: {},
})
const localesMap = Object.fromEntries(
Object.entries(import.meta.glob('../../locales/*.yml'))
.map(([path, loadLocale]) => [path.match(/([\w-]*)\.yml$/)?.[1], loadLocale]),
) as Record<Locale, () => Promise<{ default: Record<string, string> }>>
export const availableLocales = Object.keys(localesMap)
const loadedLanguages: string[] = []
function setI18nLanguage(lang: Locale) {
i18n.global.locale.value = lang as any
localStorage.setItem('lang', lang)
return lang
}
export async function loadLanguageAsync(lang: string): Promise<Locale> {
// If the same language
if (i18n.global.locale.value === lang)
return setI18nLanguage(lang)
// If the language was already loaded
if (loadedLanguages.includes(lang))
return setI18nLanguage(lang)
// If the language hasn't been loaded yet
let messages
try {
messages = await localesMap[lang]()
}
catch {
messages = await localesMap.en()
}
i18n.global.setLocaleMessage(lang, messages.default)
loadedLanguages.push(lang)
return setI18nLanguage(lang)
}

View File

@@ -0,0 +1,297 @@
<script setup lang="ts">
import Stepper from 'primevue/stepper'
import StepperPanel from 'primevue/stepperpanel'
import { useToast } from 'primevue/usetoast'
import { exit } from '@tauri-apps/api/process'
import Config from '~/components/Config.vue'
import Status from '~/components/Status.vue'
import type { NetworkConfig } from '~/types/network'
import { loadLanguageAsync } from '~/modules/i18n'
import { getAutoLaunchStatusAsync as getAutoLaunchStatus, loadAutoLaunchStatusAsync } from '~/modules/auto_launch'
import { loadRunningInstanceIdsFromLocalStorage } from '~/stores/network'
const { t, locale } = useI18n()
const visible = ref(false)
const tomlConfig = ref('')
const items = ref([
{
label: () => t('show_config'),
icon: 'pi pi-file-edit',
command: async () => {
try {
const ret = await parseNetworkConfig(networkStore.curNetwork)
tomlConfig.value = ret
}
catch (e: any) {
tomlConfig.value = e
}
visible.value = true
},
},
{
label: () => t('del_cur_network'),
icon: 'pi pi-times',
command: async () => {
networkStore.removeNetworkInstance(networkStore.curNetwork.instance_id)
await retainNetworkInstance(networkStore.networkInstanceIds)
networkStore.delCurNetwork()
},
disabled: () => networkStore.networkList.length <= 1,
},
])
enum Severity {
None = 'none',
Success = 'success',
Info = 'info',
Warn = 'warn',
Error = 'error',
}
const messageBarSeverity = ref(Severity.None)
const messageBarContent = ref('')
const toast = useToast()
const networkStore = useNetworkStore()
function addNewNetwork() {
networkStore.addNewNetwork()
networkStore.curNetwork = networkStore.lastNetwork
}
networkStore.$subscribe(async () => {
networkStore.saveToLocalStorage()
networkStore.saveRunningInstanceIdsToLocalStorage()
try {
await parseNetworkConfig(networkStore.curNetwork)
messageBarSeverity.value = Severity.None
}
catch (e: any) {
messageBarContent.value = e
messageBarSeverity.value = Severity.Error
}
})
async function runNetworkCb(cfg: NetworkConfig, cb: (e: MouseEvent) => void) {
cb({} as MouseEvent)
networkStore.removeNetworkInstance(cfg.instance_id)
await retainNetworkInstance(networkStore.networkInstanceIds)
networkStore.addNetworkInstance(cfg.instance_id)
try {
await runNetworkInstance(cfg)
}
catch (e: any) {
// console.error(e)
toast.add({ severity: 'info', detail: e })
}
}
async function stopNetworkCb(cfg: NetworkConfig, cb: (e: MouseEvent) => void) {
// console.log('stopNetworkCb', cfg, cb)
cb({} as MouseEvent)
networkStore.removeNetworkInstance(cfg.instance_id)
await retainNetworkInstance(networkStore.networkInstanceIds)
}
async function updateNetworkInfos() {
networkStore.updateWithNetworkInfos(await collectNetworkInfos())
}
let intervalId = 0
onMounted(() => {
intervalId = window.setInterval(async () => {
await updateNetworkInfos()
}, 500)
})
onUnmounted(() => clearInterval(intervalId))
const activeStep = computed(() => {
return networkStore.networkInstanceIds.includes(networkStore.curNetworkId) ? 1 : 0
})
const setting_menu = ref()
const setting_menu_items = ref([
{
label: () => t('settings'),
items: [
{
label: () => t('exchange_language'),
icon: 'pi pi-language',
command: async () => {
await loadLanguageAsync((locale.value === 'en' ? 'cn' : 'en'))
},
},
{
label: () => getAutoLaunchStatus() ? t('disable_auto_launch') : t('enable_auto_launch'),
icon: 'pi pi-desktop',
command: async () => {
await loadAutoLaunchStatusAsync(!getAutoLaunchStatus())
},
},
{
label: () => t('exit'),
icon: 'pi pi-power-off',
command: async () => {
await exit(1)
},
},
],
},
])
function toggle_setting_menu(event: any) {
setting_menu.value.toggle(event)
}
onMounted(async () => {
networkStore.loadFromLocalStorage()
if (getAutoLaunchStatus()) {
let prev_running_ids = loadRunningInstanceIdsFromLocalStorage()
for (let id of prev_running_ids) {
let cfg = networkStore.networkList.find((item) => item.instance_id === id)
if (cfg) {
networkStore.addNetworkInstance(cfg.instance_id)
await runNetworkInstance(cfg)
}
}
}
})
function isRunning(id: string) {
return networkStore.networkInstanceIds.includes(id)
}
</script>
<script lang="ts">
</script>
<template>
<div id="root" class="flex flex-column">
<Dialog v-model:visible="visible" modal header="Config File" :style="{ width: '70%' }">
<Panel>
<ScrollPanel style="width: 100%; height: 300px">
<pre>{{ tomlConfig }}</pre>
</ScrollPanel>
</Panel>
<Divider />
<div class="flex justify-content-end gap-2">
<Button type="button" :label="t('close')" @click="visible = false" />
</div>
</Dialog>
<div>
<Toolbar>
<template #start>
<div class="flex align-items-center gap-2">
<Button icon="pi pi-plus" class="mr-2" severity="primary" :label="t('add_new_network')"
@click="addNewNetwork" />
</div>
</template>
<template #center>
<div class="min-w-80 mr-20">
<Dropdown v-model="networkStore.curNetwork" :options="networkStore.networkList" :highlight-on-select="false"
:placeholder="t('select_network')" class="w-full">
<template #value="slotProps">
<div class="flex items-start content-center">
<div class="mr-3">
<span>{{ slotProps.value.network_name }}</span>
<span
v-if="isRunning(slotProps.value.instance_id) && networkStore.instances[slotProps.value.instance_id].detail && (networkStore.instances[slotProps.value.instance_id].detail?.my_node_info.virtual_ipv4 !== '')"
class="ml-3">
{{ networkStore.instances[slotProps.value.instance_id].detail
? networkStore.instances[slotProps.value.instance_id].detail?.my_node_info.virtual_ipv4 : '' }}
</span>
</div>
<Tag class="my-auto" :severity="isRunning(slotProps.value.instance_id) ? 'success' : 'info'"
:value="t(isRunning(slotProps.value.instance_id) ? 'network_running' : 'network_stopped')" />
</div>
</template>
<template #option="slotProps">
<div class="flex flex-col items-start content-center">
<div class="flex">
<div class="mr-3">
{{ t('network_name') }}: {{ slotProps.option.network_name }}
</div>
<Tag class="my-auto" :severity="isRunning(slotProps.option.instance_id) ? 'success' : 'info'"
:value="t(isRunning(slotProps.option.instance_id) ? 'network_running' : 'network_stopped')" />
</div>
<div>{{ slotProps.option.public_server_url }}</div>
</div>
</template>
</Dropdown>
</div>
</template>
<template #end>
<Button icon="pi pi-cog" class="mr-2" severity="secondary" aria-haspopup="true" :label="t('settings')"
aria-controls="overlay_setting_menu" @click="toggle_setting_menu" />
<Menu id="overlay_setting_menu" ref="setting_menu" :model="setting_menu_items" :popup="true" />
</template>
</Toolbar>
</div>
<Stepper class="h-full overflow-y-auto" :active-step="activeStep">
<StepperPanel :header="t('config_network')">
<template #content="{ nextCallback }">
<Config :instance-id="networkStore.curNetworkId" :config-invalid="messageBarSeverity !== Severity.None"
@run-network="runNetworkCb($event, nextCallback)" />
</template>
</StepperPanel>
<StepperPanel :header="t('running')">
<template #content="{ prevCallback }">
<div class="flex flex-column">
<Status :instance-id="networkStore.curNetworkId" />
</div>
<div class="flex pt-4 justify-content-center">
<Button :label="t('stop_network')" severity="danger" icon="pi pi-arrow-left"
@click="stopNetworkCb(networkStore.curNetwork, prevCallback)" />
</div>
</template>
</StepperPanel>
</Stepper>
<div>
<Menubar :model="items" breakpoint="300px" />
<InlineMessage v-if="messageBarSeverity !== Severity.None" class="absolute bottom-0 right-0" severity="error">
{{ messageBarContent }}
</InlineMessage>
</div>
</div>
</template>
<style scoped lang="postcss">
#root {
height: 100vh;
width: 100vw;
}
.p-dropdown :deep(.p-dropdown-panel .p-dropdown-items .p-dropdown-item) {
padding: 0 0.5rem;
}
</style>
<style>
body {
height: 100vh;
width: 100vw;
padding: 0;
margin: 0;
overflow: hidden;
}
.p-menubar .p-menuitem {
margin: 0;
}
/*
.p-tabview-panel {
height: 100%;
} */
</style>

View File

@@ -0,0 +1,114 @@
import type { NetworkConfig, NetworkInstance, NetworkInstanceRunningInfo } from '~/types/network'
import { DEFAULT_NETWORK_CONFIG } from '~/types/network'
export const useNetworkStore = defineStore('networkStore', {
state: () => {
const networkList = [DEFAULT_NETWORK_CONFIG()]
return {
// for initially empty lists
networkList: networkList as NetworkConfig[],
// for data that is not yet loaded
curNetwork: networkList[0],
// uuid -> instance
instances: {} as Record<string, NetworkInstance>,
networkInfos: {} as Record<string, NetworkInstanceRunningInfo>,
}
},
getters: {
lastNetwork(): NetworkConfig {
return this.networkList[this.networkList.length - 1]
},
curNetworkId(): string {
return this.curNetwork.instance_id
},
networkInstances(): Array<NetworkInstance> {
return Object.values(this.instances)
},
networkInstanceIds(): Array<string> {
return Object.keys(this.instances)
},
},
actions: {
addNewNetwork() {
this.networkList.push(DEFAULT_NETWORK_CONFIG())
},
delCurNetwork() {
const curNetworkIdx = this.networkList.indexOf(this.curNetwork)
this.networkList.splice(curNetworkIdx, 1)
const nextCurNetworkIdx = Math.min(curNetworkIdx, this.networkList.length - 1)
this.curNetwork = this.networkList[nextCurNetworkIdx]
},
removeNetworkInstance(instanceId: string) {
delete this.instances[instanceId]
},
addNetworkInstance(instanceId: string) {
this.instances[instanceId] = {
instance_id: instanceId,
running: false,
error_msg: '',
detail: undefined,
}
},
updateWithNetworkInfos(networkInfos: Record<string, NetworkInstanceRunningInfo>) {
this.networkInfos = networkInfos
for (const [instanceId, info] of Object.entries(networkInfos)) {
if (this.instances[instanceId] === undefined)
this.addNetworkInstance(instanceId)
this.instances[instanceId].running = info.running
this.instances[instanceId].error_msg = info.error_msg || ''
this.instances[instanceId].detail = info
}
this.saveRunningInstanceIdsToLocalStorage()
},
loadFromLocalStorage() {
let networkList: NetworkConfig[]
// if localStorage default is [{}], instanceId will be undefined
networkList = JSON.parse(localStorage.getItem('networkList') || '[]')
networkList = networkList.map((cfg) => {
return { ...DEFAULT_NETWORK_CONFIG(), ...cfg } as NetworkConfig
})
// prevent a empty list from localStorage, should not happen
if (networkList.length === 0)
networkList = [DEFAULT_NETWORK_CONFIG()]
this.networkList = networkList
this.curNetwork = this.networkList[0]
},
saveToLocalStorage() {
localStorage.setItem('networkList', JSON.stringify(this.networkList))
},
saveRunningInstanceIdsToLocalStorage() {
let instance_ids = Object.keys(this.instances).filter((instanceId) => this.instances[instanceId].running)
localStorage.setItem('runningInstanceIds', JSON.stringify(instance_ids))
}
},
})
if (import.meta.hot)
import.meta.hot.accept(acceptHMRUpdate(useNetworkStore as any, import.meta.hot))
export function loadRunningInstanceIdsFromLocalStorage(): string[] {
try {
return JSON.parse(localStorage.getItem('runningInstanceIds') || '[]')
} catch (e) {
console.error(e)
return []
}
}

View File

@@ -1,12 +1,12 @@
@layer tailwind-base, primevue, tailwind-utilities;
@layer tailwind-base {
@tailwind base;
@tailwind base;
}
@layer tailwind-utilities {
@tailwind components;
@tailwind utilities;
@tailwind components;
@tailwind utilities;
}
:root {
@@ -26,8 +26,23 @@
}
.card {
background: var(--surface-card);
padding: 2rem;
border-radius: 10px;
margin-bottom: 1rem;
}
background: var(--surface-card);
padding: 2rem;
border-radius: 10px;
margin-bottom: 1rem;
}
::-webkit-scrollbar {
width: 4px;
height: 4px;
border-radius: 4px;
}
::-webkit-scrollbar-track {
border-radius: 4px;
}
::-webkit-scrollbar-thumb {
border-radius: 4px;
background-color: #0000005d;
}

23
easytier-gui/src/typed-router.d.ts vendored Normal file
View File

@@ -0,0 +1,23 @@
/* eslint-disable */
/* prettier-ignore */
// @ts-nocheck
// Generated by unplugin-vue-router. ‼️ DO NOT MODIFY THIS FILE ‼️
// It's recommended to commit this file.
// Make sure to add this file to your tsconfig.json file as an "includes" or "files" entry.
declare module 'vue-router/auto-routes' {
import type {
RouteRecordInfo,
ParamValue,
ParamValueOneOrMore,
ParamValueZeroOrMore,
ParamValueZeroOrOne,
} from 'unplugin-vue-router/types'
/**
* Route name map generated by unplugin-vue-router
*/
export interface RouteNamedMap {
'/': RouteRecordInfo<'/', '/', Record<never, never>, Record<never, never>>,
}
}

View File

@@ -0,0 +1,162 @@
import { v4 as uuidv4 } from 'uuid'
export enum NetworkingMethod {
PublicServer = 'PublicServer',
Manual = 'Manual',
Standalone = 'Standalone',
}
export interface NetworkConfig {
instance_id: string
dhcp: boolean
virtual_ipv4: string
hostname?: string
network_name: string
network_secret: string
networking_method: NetworkingMethod
public_server_url: string
peer_urls: string[]
proxy_cidrs: string[]
enable_vpn_portal: boolean
vpn_portal_listen_port: number
vpn_portal_client_network_addr: string
vpn_portal_client_network_len: number
advanced_settings: boolean
listener_urls: string[]
rpc_port: number
}
export function DEFAULT_NETWORK_CONFIG(): NetworkConfig {
return {
instance_id: uuidv4(),
dhcp: false,
virtual_ipv4: '',
network_name: 'easytier',
network_secret: '',
networking_method: NetworkingMethod.PublicServer,
public_server_url: 'tcp://easytier.public.kkrainbow.top:11010',
peer_urls: [],
proxy_cidrs: [],
enable_vpn_portal: false,
vpn_portal_listen_port: 22022,
vpn_portal_client_network_addr: '',
vpn_portal_client_network_len: 24,
advanced_settings: false,
listener_urls: [
'tcp://0.0.0.0:11010',
'udp://0.0.0.0:11010',
'wg://0.0.0.0:11011',
],
rpc_port: 0,
}
}
export interface NetworkInstance {
instance_id: string
running: boolean
error_msg: string
detail?: NetworkInstanceRunningInfo
}
export interface NetworkInstanceRunningInfo {
my_node_info: NodeInfo
events: Record<string, any>
node_info: NodeInfo
routes: Route[]
peers: PeerInfo[]
peer_route_pairs: PeerRoutePair[]
running: boolean
error_msg?: string
}
export interface NodeInfo {
virtual_ipv4: string
ips: {
public_ipv4: string
interface_ipv4s: string[]
public_ipv6: string
interface_ipv6s: string[]
listeners: {
serialization: string
scheme_end: number
username_end: number
host_start: number
host_end: number
host: any
port?: number
path_start: number
query_start?: number
fragment_start?: number
}[]
}
stun_info: StunInfo
listeners: string[]
vpn_portal_cfg?: string
}
export interface StunInfo {
udp_nat_type: number
tcp_nat_type: number
last_update_time: number
}
export interface Route {
peer_id: number
ipv4_addr: string
next_hop_peer_id: number
cost: number
proxy_cidrs: string[]
hostname: string
stun_info?: StunInfo
inst_id: string
}
export interface PeerInfo {
peer_id: number
conns: PeerConnInfo[]
}
export interface PeerConnInfo {
conn_id: string
my_peer_id: number
peer_id: number
features: string[]
tunnel?: TunnelInfo
stats?: PeerConnStats
loss_rate: number
}
export interface PeerRoutePair {
route: Route
peer?: PeerInfo
}
export interface TunnelInfo {
tunnel_type: string
local_addr: string
remote_addr: string
}
export interface PeerConnStats {
rx_bytes: number
tx_bytes: number
rx_packets: number
tx_packets: number
latency_us: number
}

View File

@@ -1,7 +1,8 @@
/// <reference types="vite/client" />
declare module "*.vue" {
import type { DefineComponent } from "vue";
const component: DefineComponent<{}, {}, any>;
export default component;
declare module '*.vue' {
import type { DefineComponent } from 'vue'
const component: DefineComponent<object, object, any>
export default component
}

View File

@@ -1,12 +1,11 @@
/** @type {import('tailwindcss').Config} */
export default {
content: [
"./index.html",
"./src/**/*.{vue,js,ts,jsx,tsx}",
'./index.html',
'./src/**/*.{vue,js,ts,jsx,tsx}',
],
theme: {
extend: {},
},
plugins: [],
}

View File

@@ -1,25 +1,45 @@
{
"compilerOptions": {
"target": "ES2020",
"useDefineForClassFields": true,
"module": "ESNext",
"lib": ["ES2020", "DOM", "DOM.Iterable"],
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"target": "ESNext",
"jsx": "preserve",
/* Linting */
"lib": [
"DOM",
"ESNext"
],
"baseUrl": ".",
"module": "ESNext",
"moduleResolution": "bundler",
"paths": {
"~/*": [
"src/*"
]
},
"resolveJsonModule": true,
"types": [
"vite/client",
"vite-plugin-vue-layouts/client",
"unplugin-vue-macros/macros-global",
"unplugin-vue-router/client"
],
"allowImportingTsExtensions": true,
"allowJs": true,
"strict": true,
"strictNullChecks": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true
"noEmit": true,
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"isolatedModules": true,
"skipLibCheck": true
},
"include": ["src/**/*.ts", "src/**/*.d.ts", "src/**/*.tsx", "src/**/*.vue"],
"references": [{ "path": "./tsconfig.node.json" }]
"vueCompilerOptions": {
"plugins": [
"@vue-macros/volar/define-models",
"@vue-macros/volar/define-slots"
]
},
"exclude": [
"dist",
"node_modules"
]
}

View File

@@ -1,10 +0,0 @@
{
"compilerOptions": {
"composite": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true
},
"include": ["vite.config.ts"]
}

View File

@@ -1,19 +1,84 @@
import { defineConfig } from "vite";
import vue from "@vitejs/plugin-vue";
import Components from 'unplugin-vue-components/vite';
import { PrimeVueResolver } from 'unplugin-vue-components/resolvers';
import path from 'node:path'
import { defineConfig } from 'vite'
import Vue from '@vitejs/plugin-vue'
import Layouts from 'vite-plugin-vue-layouts'
import Components from 'unplugin-vue-components/vite'
import AutoImport from 'unplugin-auto-import/vite'
import VueMacros from 'unplugin-vue-macros/vite'
import VueI18n from '@intlify/unplugin-vue-i18n/vite'
import VueDevTools from 'vite-plugin-vue-devtools'
import VueRouter from 'unplugin-vue-router/vite'
import { VueRouterAutoImports } from 'unplugin-vue-router'
import { PrimeVueResolver } from 'unplugin-vue-components/resolvers'
// https://vitejs.dev/config/
export default defineConfig(async () => ({
resolve: {
alias: {
'~/': `${path.resolve(__dirname, 'src')}/`,
},
},
plugins: [
vue(),
VueMacros({
plugins: {
vue: Vue({
include: [/\.vue$/, /\.md$/],
}),
},
}),
// https://github.com/posva/unplugin-vue-router
VueRouter({
extensions: ['.vue', '.md'],
dts: 'src/typed-router.d.ts',
}),
// https://github.com/JohnCampionJr/vite-plugin-vue-layouts
Layouts(),
// https://github.com/antfu/unplugin-auto-import
AutoImport({
imports: [
'vue',
'vue-i18n',
'pinia',
VueRouterAutoImports,
{
// add any other imports you were relying on
'vue-router/auto': ['useLink'],
},
],
dts: 'src/auto-imports.d.ts',
dirs: [
'src/composables',
'src/stores',
],
vueTemplate: true,
}),
// https://github.com/antfu/unplugin-vue-components
Components({
dts: true,
// allow auto load markdown components under `./src/components/`
extensions: ['vue', 'md'],
// allow auto import and register components used in markdown
include: [/\.vue$/, /\.vue\?vue/, /\.md$/],
dts: 'src/components.d.ts',
resolvers: [
PrimeVueResolver()
]
})],
PrimeVueResolver(),
],
}),
// https://github.com/intlify/bundle-tools/tree/main/packages/unplugin-vue-i18n
VueI18n({
runtimeOnly: true,
compositionOnly: true,
fullInstall: true,
include: [path.resolve(__dirname, 'locales/**')],
}),
// https://github.com/webfansplz/vite-plugin-vue-devtools
VueDevTools(),
],
// Vite options tailored for Tauri development and only applied in `tauri dev` or `tauri build`
//
@@ -25,7 +90,7 @@ export default defineConfig(async () => ({
strictPort: true,
watch: {
// 3. tell vite to ignore watching `src-tauri`
ignored: ["**/src-tauri/**"],
ignored: ['**/src-tauri/**'],
},
},
}));
}))

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ name = "easytier"
description = "A full meshed p2p VPN, connecting all your devices in one network with one command."
homepage = "https://github.com/KKRainbow/EasyTier"
repository = "https://github.com/KKRainbow/EasyTier"
version = "0.1.2"
version = "1.1.0"
edition = "2021"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
@@ -42,7 +42,7 @@ auto_impl = "1.1.0"
crossbeam = "0.8.4"
time = "0.3"
toml = "0.8.12"
chrono = "0.4.35"
chrono = { version = "0.4.37", features = ["serde"] }
gethostname = "0.4.3"
@@ -65,12 +65,27 @@ pin-project-lite = "0.2.13"
atomicbox = "0.4.0"
tachyonix = "0.2.1"
quinn = { version = "0.10.2" }
rustls = { version = "0.21.0", features = ["dangerous_configuration"] }
rcgen = "0.11.1"
quinn = { version = "0.11.0", optional = true, features = ["ring"] }
rustls = { version = "0.23.0", features = [
"ring",
], default-features = false, optional = true }
rcgen = { version = "0.11.1", optional = true }
# for websocket
tokio-websockets = { version = "0.8.2", optional = true, features = [
"rustls-webpki-roots",
"client",
"server",
"fastrand",
"ring",
] }
http = { version = "1", default-features = false, features = [
"std",
], optional = true }
tokio-rustls = { version = "0.26", default-features = false, optional = true }
# for tap device
tun = { version = "0.6.1", features = ["async"] }
tun = { package = "tun-easytier", version = "0.6.1", features = ["async"] }
# for net ns
nix = { version = "0.27", features = ["sched", "socket", "ioctl"] }
@@ -86,13 +101,6 @@ crossbeam-queue = "0.3"
once_cell = "1.18.0"
# for packet
rkyv = { "version" = "0.7.42", features = [
"validation",
"archive_le",
"strict",
"copy_unsafe",
"arbitrary_enum_discriminant",
] }
postcard = { "version" = "1.0.8", features = ["alloc"] }
# for rpc
@@ -118,7 +126,6 @@ rand = "0.8.5"
serde = { version = "1.0", features = ["derive"] }
pnet = { version = "0.34.0", features = ["serde"] }
public-ip = { version = "0.2", features = ["default"] }
clap = { version = "4.4.8", features = ["unicode", "derive", "wrap_help"] }
@@ -127,12 +134,12 @@ async-recursion = "1.0.5"
network-interface = "1.1.1"
# for ospf route
pathfinding = "4.9.1"
petgraph = "0.6.5"
# for encryption
boringtun = { version = "0.6.0" }
ring = { version = "0.16" }
boringtun = { package = "boringtun-easytier", version = "*", optional = true } # for encryption
ring = { version = "0.17", optional = true }
bitflags = "2.5"
aes-gcm = { version = "0.10.3", optional = true }
# for cli
tabled = "0.15.*"
@@ -142,7 +149,11 @@ base64 = "0.21.7"
derivative = "2.2.0"
mimalloc-rust = "0.2.1"
mimalloc-rust = { version = "0.2.1", optional = true }
indexmap = { version = "~1.9.3", optional = false, features = ["std"] }
atomic-shim = "0.2.0"
[target.'cfg(windows)'.dependencies]
windows-sys = { version = "0.52", features = [
@@ -151,6 +162,7 @@ windows-sys = { version = "0.52", features = [
"Win32_Foundation",
"Win32_System_IO",
] }
encoding = "0.2"
[build-dependencies]
tonic-build = "0.10"
@@ -164,3 +176,20 @@ zip = "0.6.6"
serial_test = "3.0.0"
rstest = "0.18.2"
defguard_wireguard_rs = "0.4.2"
[features]
default = ["wireguard", "mimalloc", "websocket"]
full = ["quic", "websocket", "wireguard", "mimalloc", "aes-gcm"]
mips = ["aes-gcm", "mimalloc", "wireguard"]
wireguard = ["dep:boringtun", "dep:ring"]
quic = ["dep:quinn", "dep:rustls", "dep:rcgen"]
mimalloc = ["dep:mimalloc-rust"]
aes-gcm = ["dep:aes-gcm"]
websocket = [
"dep:tokio-websockets",
"dep:http",
"dep:tokio-rustls",
"dep:rustls",
"dep:rcgen",
]

View File

@@ -19,8 +19,8 @@ impl WindowsBuild {
let path = env::var_os("PATH").unwrap_or_default();
for p in env::split_paths(&path) {
let p = p.join("protoc");
if p.exists() {
let p = p.join("protoc.exe");
if p.exists() && p.is_file() {
return Some(p);
}
}

View File

@@ -59,6 +59,9 @@ message StunInfo {
NatType udp_nat_type = 1;
NatType tcp_nat_type = 2;
int64 last_update_time = 3;
repeated string public_ip = 4;
uint32 min_port = 5;
uint32 max_port = 6;
}
message Route {
@@ -117,16 +120,8 @@ service ConnectorManageRpc {
rpc ManageConnector (ManageConnectorRequest) returns (ManageConnectorResponse);
}
enum LatencyLevel {
VeryLow = 0;
Low = 1;
Normal = 2;
High = 3;
VeryHigh = 4;
}
message DirectConnectedPeerInfo {
LatencyLevel latency_level = 2;
int32 latency_ms = 1;
}
message PeerInfoForGlobalMap {
@@ -174,4 +169,7 @@ message TaRpcPacket {
uint32 transact_id = 4;
bool is_req = 5;
bytes content = 6;
uint32 total_pieces = 7;
uint32 piece_idx = 8;
}

View File

@@ -1,5 +1,6 @@
use std::{
net::SocketAddr,
net::{Ipv4Addr, SocketAddr},
path::PathBuf,
sync::{Arc, Mutex},
};
@@ -13,6 +14,9 @@ pub trait ConfigLoader: Send + Sync {
fn get_id(&self) -> uuid::Uuid;
fn set_id(&self, id: uuid::Uuid);
fn get_hostname(&self) -> String;
fn set_hostname(&self, name: Option<String>);
fn get_inst_name(&self) -> String;
fn set_inst_name(&self, name: String);
@@ -20,7 +24,10 @@ pub trait ConfigLoader: Send + Sync {
fn set_netns(&self, ns: Option<String>);
fn get_ipv4(&self) -> Option<std::net::Ipv4Addr>;
fn set_ipv4(&self, addr: std::net::Ipv4Addr);
fn set_ipv4(&self, addr: Option<std::net::Ipv4Addr>);
fn get_dhcp(&self) -> bool;
fn set_dhcp(&self, dhcp: bool);
fn add_proxy_cidr(&self, cidr: cidr::IpCidr);
fn remove_proxy_cidr(&self, cidr: cidr::IpCidr);
@@ -51,6 +58,9 @@ pub trait ConfigLoader: Send + Sync {
fn get_flags(&self) -> Flags;
fn set_flags(&self, flags: Flags);
fn get_exit_nodes(&self) -> Vec<Ipv4Addr>;
fn set_exit_nodes(&self, nodes: Vec<Ipv4Addr>);
fn dump(&self) -> String;
}
@@ -144,16 +154,25 @@ pub struct Flags {
pub enable_encryption: bool,
#[derivative(Default(value = "true"))]
pub enable_ipv6: bool,
#[derivative(Default(value = "1380"))]
pub mtu: u16,
#[derivative(Default(value = "true"))]
pub latency_first: bool,
#[derivative(Default(value = "false"))]
pub enable_exit_node: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)]
struct Config {
netns: Option<String>,
hostname: Option<String>,
instance_name: Option<String>,
instance_id: Option<uuid::Uuid>,
ipv4: Option<String>,
dhcp: Option<bool>,
network_identity: Option<NetworkIdentity>,
listeners: Option<Vec<url::Url>>,
exit_nodes: Option<Vec<Ipv4Addr>>,
peer: Option<Vec<PeerConfig>>,
proxy_network: Option<Vec<NetworkConfig>>,
@@ -187,15 +206,23 @@ impl TomlConfigLoader {
config_str, config_str
)
})?;
Ok(TomlConfigLoader {
config: Arc::new(Mutex::new(config)),
})
}
pub fn new(config_path: &str) -> Result<Self, anyhow::Error> {
pub fn new(config_path: &PathBuf) -> Result<Self, anyhow::Error> {
let config_str = std::fs::read_to_string(config_path)
.with_context(|| format!("failed to read config file: {}", config_path))?;
Self::new_from_str(&config_str)
.with_context(|| format!("failed to read config file: {:?}", config_path))?;
let ret = Self::new_from_str(&config_str)?;
let old_ns = ret.get_network_identity();
ret.set_network_identity(NetworkIdentity::new(
old_ns.network_name,
old_ns.network_secret.unwrap_or_default(),
));
Ok(ret)
}
}
@@ -213,6 +240,39 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().instance_name = Some(name);
}
fn get_hostname(&self) -> String {
let hostname = self.config.lock().unwrap().hostname.clone();
match hostname {
Some(hostname) => {
if !hostname.is_empty() {
let mut name = hostname
.chars()
.filter(|c| c.is_ascii_alphanumeric() || *c == '-' || *c == '_')
.take(32)
.collect::<String>();
if name.len() > 32 {
name = name.chars().take(32).collect::<String>();
}
if hostname != name {
self.set_hostname(Some(name.clone()));
}
name
} else {
self.set_hostname(None);
gethostname::gethostname().to_string_lossy().to_string()
}
}
None => gethostname::gethostname().to_string_lossy().to_string(),
}
}
fn set_hostname(&self, name: Option<String>) {
self.config.lock().unwrap().hostname = name;
}
fn get_netns(&self) -> Option<String> {
self.config.lock().unwrap().netns.clone()
}
@@ -230,8 +290,20 @@ impl ConfigLoader for TomlConfigLoader {
.flatten()
}
fn set_ipv4(&self, addr: std::net::Ipv4Addr) {
self.config.lock().unwrap().ipv4 = Some(addr.to_string());
fn set_ipv4(&self, addr: Option<std::net::Ipv4Addr>) {
self.config.lock().unwrap().ipv4 = if let Some(addr) = addr {
Some(addr.to_string())
} else {
None
};
}
fn get_dhcp(&self) -> bool {
self.config.lock().unwrap().dhcp.unwrap_or_default()
}
fn set_dhcp(&self, dhcp: bool) {
self.config.lock().unwrap().dhcp = Some(dhcp);
}
fn add_proxy_cidr(&self, cidr: cidr::IpCidr) {
@@ -393,6 +465,19 @@ impl ConfigLoader for TomlConfigLoader {
self.config.lock().unwrap().flags = Some(flags);
}
fn get_exit_nodes(&self) -> Vec<Ipv4Addr> {
self.config
.lock()
.unwrap()
.exit_nodes
.clone()
.unwrap_or_default()
}
fn set_exit_nodes(&self, nodes: Vec<Ipv4Addr>) {
self.config.lock().unwrap().exit_nodes = Some(nodes);
}
fn dump(&self) -> String {
toml::to_string_pretty(&*self.config.lock().unwrap()).unwrap()
}

View File

@@ -0,0 +1,24 @@
#[doc(hidden)]
pub struct Defer<F: FnOnce()> {
// internal struct used by defer! macro
func: Option<F>,
}
impl<F: FnOnce()> Defer<F> {
pub fn new(func: F) -> Self {
Self { func: Some(func) }
}
}
impl<F: FnOnce()> Drop for Defer<F> {
fn drop(&mut self) {
self.func.take().map(|f| f());
}
}
#[macro_export]
macro_rules! defer {
( $($tt:tt)* ) => {
let _deferred = $crate::common::defer::Defer::new(|| { $($tt)* });
};
}

View File

@@ -27,14 +27,18 @@ pub enum GlobalCtxEvent {
PeerConnRemoved(PeerConnInfo),
ListenerAdded(url::Url),
ConnectionAccepted(String, String), // (local url, remote url)
ListenerAddFailed(url::Url, String), // (url, error message)
ConnectionAccepted(String, String), // (local url, remote url)
ConnectionError(String, String, String), // (local url, remote url, error message)
Connecting(url::Url),
ConnectError(String, String), // (dst, error message)
ConnectError(String, String, String), // (dst, ip version, error message)
VpnPortalClientConnected(String, String), // (portal, client ip)
VpnPortalClientDisconnected(String, String), // (portal, client ip)
DhcpIpv4Changed(Option<std::net::Ipv4Addr>, Option<std::net::Ipv4Addr>), // (old, new)
DhcpIpv4Conflicted(Option<std::net::Ipv4Addr>),
}
type EventBus = tokio::sync::broadcast::Sender<GlobalCtxEvent>;
@@ -54,11 +58,13 @@ pub struct GlobalCtx {
ip_collector: Arc<IPCollector>,
hotname: AtomicCell<Option<String>>,
hostname: String,
stun_info_collection: Box<dyn StunInfoCollectorTrait>,
running_listeners: Mutex<Vec<url::Url>>,
enable_exit_node: bool,
}
impl std::fmt::Debug for GlobalCtx {
@@ -80,9 +86,14 @@ impl GlobalCtx {
let id = config_fs.get_id();
let network = config_fs.get_network_identity();
let net_ns = NetNS::new(config_fs.get_netns());
let hostname = config_fs.get_hostname();
let (event_bus, _) = tokio::sync::broadcast::channel(100);
let stun_info_collection = Arc::new(StunInfoCollector::new_with_default_servers());
let enable_exit_node = config_fs.get_flags().enable_exit_node;
GlobalCtx {
inst_name: config_fs.get_inst_name(),
id,
@@ -94,13 +105,15 @@ impl GlobalCtx {
cached_ipv4: AtomicCell::new(None),
cached_proxy_cidrs: AtomicCell::new(None),
ip_collector: Arc::new(IPCollector::new(net_ns)),
ip_collector: Arc::new(IPCollector::new(net_ns, stun_info_collection.clone())),
hotname: AtomicCell::new(None),
hostname,
stun_info_collection: Box::new(StunInfoCollector::new_with_default_servers()),
stun_info_collection: Box::new(stun_info_collection),
running_listeners: Mutex::new(Vec::new()),
enable_exit_node,
}
}
@@ -125,7 +138,7 @@ impl GlobalCtx {
return addr;
}
pub fn set_ipv4(&mut self, addr: std::net::Ipv4Addr) {
pub fn set_ipv4(&self, addr: Option<std::net::Ipv4Addr>) {
self.config.set_ipv4(addr);
self.cached_ipv4.store(None);
}
@@ -165,15 +178,8 @@ impl GlobalCtx {
self.ip_collector.clone()
}
pub fn get_hostname(&self) -> Option<String> {
if let Some(hostname) = self.hotname.take() {
self.hotname.store(Some(hostname.clone()));
return Some(hostname);
}
let hostname = gethostname::gethostname().to_string_lossy().to_string();
self.hotname.store(Some(hostname.clone()));
return Some(hostname);
pub fn get_hostname(&self) -> String {
return self.hostname.clone();
}
pub fn get_stun_info_collector(&self) -> impl StunInfoCollectorTrait + '_ {
@@ -224,6 +230,10 @@ impl GlobalCtx {
hasher.write(&key[0..16]);
key
}
pub fn enable_exit_node(&self) -> bool {
self.enable_exit_node
}
}
#[cfg(test)]

View File

@@ -30,6 +30,7 @@ pub trait IfConfiguerTrait: Send + Sync {
async fn wait_interface_show(&self, _name: &str) -> Result<(), Error> {
return Ok(());
}
async fn set_mtu(&self, _name: &str, _mtu: u32) -> Result<(), Error>;
}
fn cidr_to_subnet_mask(prefix_length: u8) -> Ipv4Addr {
@@ -49,21 +50,36 @@ fn cidr_to_subnet_mask(prefix_length: u8) -> Ipv4Addr {
}
async fn run_shell_cmd(cmd: &str) -> Result<(), Error> {
let cmd_out = if cfg!(target_os = "windows") {
Command::new("cmd").arg("/C").arg(cmd).output().await?
} else {
Command::new("sh").arg("-c").arg(cmd).output().await?
let cmd_out: std::process::Output;
let stdout: String;
let stderr: String;
#[cfg(target_os = "windows")]
{
const CREATE_NO_WINDOW: u32 = 0x08000000;
cmd_out = Command::new("cmd")
.stdin(std::process::Stdio::null())
.arg("/C")
.arg(cmd)
.creation_flags(CREATE_NO_WINDOW)
.output()
.await?;
stdout = crate::utils::utf8_or_gbk_to_string(cmd_out.stdout.as_slice());
stderr = crate::utils::utf8_or_gbk_to_string(cmd_out.stderr.as_slice());
};
let stdout = String::from_utf8_lossy(cmd_out.stdout.as_slice());
let stderr = String::from_utf8_lossy(cmd_out.stderr.as_slice());
#[cfg(not(target_os = "windows"))]
{
cmd_out = Command::new("sh").arg("-c").arg(cmd).output().await?;
stdout = String::from_utf8_lossy(cmd_out.stdout.as_slice()).to_string();
stderr = String::from_utf8_lossy(cmd_out.stderr.as_slice()).to_string();
};
let ec = cmd_out.status.code();
let succ = cmd_out.status.success();
tracing::info!(?cmd, ?ec, ?succ, ?stdout, ?stderr, "run shell cmd");
if !cmd_out.status.success() {
return Err(Error::ShellCommandError(
stdout.to_string() + &stderr.to_string(),
));
return Err(Error::ShellCommandError(stdout + &stderr));
}
Ok(())
}
@@ -138,6 +154,10 @@ impl IfConfiguerTrait for MacIfConfiger {
.await
}
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
run_shell_cmd(format!("ifconfig {} mtu {}", name, mtu).as_str()).await
}
}
pub struct LinuxIfConfiger {}
@@ -194,6 +214,10 @@ impl IfConfiguerTrait for LinuxIfConfiger {
.await
}
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
run_shell_cmd(format!("ip link set dev {} mtu {}", name, mtu).as_str()).await
}
}
#[cfg(target_os = "windows")]
@@ -346,6 +370,13 @@ impl IfConfiguerTrait for WindowsIfConfiger {
.await??,
)
}
async fn set_mtu(&self, name: &str, mtu: u32) -> Result<(), Error> {
run_shell_cmd(
format!("netsh interface ipv4 set subinterface {} mtu={}", name, mtu).as_str(),
)
.await
}
}
#[cfg(target_os = "macos")]

View File

@@ -8,12 +8,12 @@ use tracing::Instrument;
pub mod config;
pub mod constants;
pub mod defer;
pub mod error;
pub mod global_ctx;
pub mod ifcfg;
pub mod netns;
pub mod network;
pub mod rkyv_util;
pub mod stun;
pub mod stun_codec_ext;

View File

@@ -75,7 +75,7 @@ impl NetNSGuard {
}
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct NetNS {
name: Option<String>,
}

View File

@@ -1,4 +1,4 @@
use std::{ops::Deref, sync::Arc};
use std::{net::IpAddr, ops::Deref, sync::Arc};
use crate::rpc::peer::GetIpListResponse;
use pnet::datalink::NetworkInterface;
@@ -7,7 +7,7 @@ use tokio::{
task::JoinSet,
};
use super::netns::NetNS;
use super::{netns::NetNS, stun::StunInfoCollectorTrait};
pub const CACHED_IP_LIST_TIMEOUT_SEC: u64 = 60;
@@ -142,14 +142,16 @@ pub struct IPCollector {
cached_ip_list: Arc<RwLock<GetIpListResponse>>,
collect_ip_task: Mutex<JoinSet<()>>,
net_ns: NetNS,
stun_info_collector: Arc<Box<dyn StunInfoCollectorTrait>>,
}
impl IPCollector {
pub fn new(net_ns: NetNS) -> Self {
pub fn new<T: StunInfoCollectorTrait + 'static>(net_ns: NetNS, stun_info_collector: T) -> Self {
Self {
cached_ip_list: Arc::new(RwLock::new(GetIpListResponse::new())),
collect_ip_task: Mutex::new(JoinSet::new()),
net_ns,
stun_info_collector: Arc::new(Box::new(stun_info_collector)),
}
}
@@ -158,16 +160,41 @@ impl IPCollector {
if task.is_empty() {
let cached_ip_list = self.cached_ip_list.clone();
*cached_ip_list.write().await =
Self::do_collect_ip_addrs(false, self.net_ns.clone()).await;
Self::do_collect_local_ip_addrs(self.net_ns.clone()).await;
let net_ns = self.net_ns.clone();
let stun_info_collector = self.stun_info_collector.clone();
task.spawn(async move {
loop {
let ip_addrs = Self::do_collect_ip_addrs(true, net_ns.clone()).await;
let ip_addrs = Self::do_collect_local_ip_addrs(net_ns.clone()).await;
*cached_ip_list.write().await = ip_addrs;
tokio::time::sleep(std::time::Duration::from_secs(CACHED_IP_LIST_TIMEOUT_SEC))
.await;
}
});
let cached_ip_list = self.cached_ip_list.clone();
task.spawn(async move {
loop {
let stun_info = stun_info_collector.get_stun_info();
for ip in stun_info.public_ip.iter() {
let Ok(ip_addr) = ip.parse::<IpAddr>() else {
continue;
};
if ip_addr.is_ipv4() {
cached_ip_list.write().await.public_ipv4 = ip.clone();
} else {
cached_ip_list.write().await.public_ipv6 = ip.clone();
}
}
let sleep_sec = if !cached_ip_list.read().await.public_ipv4.is_empty() {
CACHED_IP_LIST_TIMEOUT_SEC
} else {
3
};
tokio::time::sleep(std::time::Duration::from_secs(sleep_sec)).await;
}
});
}
return self.cached_ip_list.read().await.deref().clone();
@@ -193,21 +220,9 @@ impl IPCollector {
}
#[tracing::instrument(skip(net_ns))]
async fn do_collect_ip_addrs(with_public: bool, net_ns: NetNS) -> GetIpListResponse {
async fn do_collect_local_ip_addrs(net_ns: NetNS) -> GetIpListResponse {
let mut ret = crate::rpc::peer::GetIpListResponse::new();
if with_public {
if let Some(v4_addr) =
public_ip::addr_with(public_ip::http::ALL, public_ip::Version::V4).await
{
ret.public_ipv4 = v4_addr.to_string();
}
if let Some(v6_addr) = public_ip::addr_v6().await {
ret.public_ipv6 = v6_addr.to_string();
}
}
let ifaces = Self::collect_interfaces(net_ns.clone()).await;
let _g = net_ns.guard();
for iface in ifaces {

View File

@@ -1,72 +0,0 @@
use rkyv::{
string::ArchivedString,
validation::{validators::DefaultValidator, CheckTypeError},
vec::ArchivedVec,
Archive, CheckBytes, Serialize,
};
use tokio_util::bytes::{Bytes, BytesMut};
pub fn decode_from_bytes_checked<'a, T: Archive>(
bytes: &'a [u8],
) -> Result<&'a T::Archived, CheckTypeError<T::Archived, DefaultValidator<'a>>>
where
T::Archived: CheckBytes<DefaultValidator<'a>>,
{
rkyv::check_archived_root::<T>(bytes)
}
pub fn decode_from_bytes<'a, T: Archive>(
bytes: &'a [u8],
) -> Result<&'a T::Archived, CheckTypeError<T::Archived, DefaultValidator<'a>>>
where
T::Archived: CheckBytes<DefaultValidator<'a>>,
{
// rkyv::check_archived_root::<T>(bytes)
unsafe { Ok(rkyv::archived_root::<T>(bytes)) }
}
// allow deseraial T to Bytes
pub fn encode_to_bytes<T, const N: usize>(val: &T) -> Bytes
where
T: Serialize<rkyv::ser::serializers::AllocSerializer<N>>,
{
let ret = rkyv::to_bytes::<_, N>(val).unwrap();
// let mut r = BytesMut::new();
// r.extend_from_slice(&ret);
// r.freeze()
ret.into_boxed_slice().into()
}
pub fn extract_bytes_from_archived_vec(raw_data: &Bytes, archived_data: &ArchivedVec<u8>) -> Bytes {
let ptr_range = archived_data.as_ptr_range();
let offset = ptr_range.start as usize - raw_data.as_ptr() as usize;
let len = ptr_range.end as usize - ptr_range.start as usize;
return raw_data.slice(offset..offset + len);
}
pub fn extract_bytes_from_archived_string(
raw_data: &Bytes,
archived_data: &ArchivedString,
) -> Bytes {
let offset = archived_data.as_ptr() as usize - raw_data.as_ptr() as usize;
let len = archived_data.len();
if offset + len > raw_data.len() {
return Bytes::new();
}
return raw_data.slice(offset..offset + archived_data.len());
}
pub fn extract_bytes_mut_from_archived_vec(
raw_data: &mut BytesMut,
archived_data: &ArchivedVec<u8>,
) -> BytesMut {
let ptr_range = archived_data.as_ptr_range();
let offset = ptr_range.start as usize - raw_data.as_ptr() as usize;
let len = ptr_range.end as usize - ptr_range.start as usize;
raw_data.split_off(offset).split_to(len)
}
pub fn vec_to_string(vec: Vec<u8>) -> String {
unsafe { String::from_utf8_unchecked(vec) }
}

View File

@@ -1,18 +1,20 @@
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use std::collections::BTreeSet;
use std::net::{IpAddr, SocketAddr};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use crate::rpc::{NatType, StunInfo};
use anyhow::Context;
use chrono::Local;
use crossbeam::atomic::AtomicCell;
use rand::seq::IteratorRandom;
use tokio::net::{lookup_host, UdpSocket};
use tokio::sync::RwLock;
use tokio::sync::{broadcast, Mutex};
use tokio::task::JoinSet;
use tracing::Level;
use tracing::{Instrument, Level};
use bytecodec::{DecodeExt, EncodeExt};
use stun_codec::rfc5389::methods::BINDING;
use stun_codec::rfc5780::attributes::ChangeRequest;
use stun_codec::{Message, MessageClass, MessageDecoder, MessageEncoder};
use crate::common::error::Error;
@@ -22,13 +24,15 @@ use super::stun_codec_ext::*;
struct HostResolverIter {
hostnames: Vec<String>,
ips: Vec<SocketAddr>,
max_ip_per_domain: u32,
}
impl HostResolverIter {
fn new(hostnames: Vec<String>) -> Self {
fn new(hostnames: Vec<String>, max_ip_per_domain: u32) -> Self {
Self {
hostnames,
ips: vec![],
max_ip_per_domain,
}
}
@@ -40,9 +44,17 @@ impl HostResolverIter {
}
let host = self.hostnames.remove(0);
let host = if host.contains(':') {
host
} else {
format!("{}:3478", host)
};
match lookup_host(&host).await {
Ok(ips) => {
self.ips = ips.collect();
self.ips = ips
.filter(|x| x.is_ipv4())
.choose_multiple(&mut rand::thread_rng(), self.max_ip_per_domain as usize);
}
Err(e) => {
tracing::warn!(?host, ?e, "lookup host for stun failed");
@@ -55,19 +67,30 @@ impl HostResolverIter {
}
}
#[derive(Debug, Clone)]
struct StunPacket {
data: Vec<u8>,
addr: SocketAddr,
}
type StunPacketReceiver = tokio::sync::broadcast::Receiver<StunPacket>;
#[derive(Debug, Clone, Copy)]
struct BindRequestResponse {
source_addr: SocketAddr,
send_to_addr: SocketAddr,
local_addr: SocketAddr,
stun_server_addr: SocketAddr,
recv_from_addr: SocketAddr,
mapped_socket_addr: Option<SocketAddr>,
changed_socket_addr: Option<SocketAddr>,
ip_changed: bool,
port_changed: bool,
change_ip: bool,
change_port: bool,
real_ip_changed: bool,
real_port_changed: bool,
latency_us: u32,
}
impl BindRequestResponse {
@@ -77,18 +100,26 @@ impl BindRequestResponse {
}
#[derive(Debug, Clone)]
struct Stun {
struct StunClient {
stun_server: SocketAddr,
req_repeat: u8,
resp_timeout: Duration,
req_repeat: u32,
socket: Arc<UdpSocket>,
stun_packet_receiver: Arc<Mutex<StunPacketReceiver>>,
}
impl Stun {
pub fn new(stun_server: SocketAddr) -> Self {
impl StunClient {
pub fn new(
stun_server: SocketAddr,
socket: Arc<UdpSocket>,
stun_packet_receiver: StunPacketReceiver,
) -> Self {
Self {
stun_server,
req_repeat: 2,
resp_timeout: Duration::from_millis(3000),
req_repeat: 2,
socket,
stun_packet_receiver: Arc::new(Mutex::new(stun_packet_receiver)),
}
}
@@ -96,7 +127,6 @@ impl Stun {
async fn wait_stun_response<'a, const N: usize>(
&self,
buf: &'a mut [u8; N],
udp: &UdpSocket,
tids: &Vec<u128>,
expected_ip_changed: bool,
expected_port_changed: bool,
@@ -106,16 +136,20 @@ impl Stun {
let deadline = now + self.resp_timeout;
while now < deadline {
let mut udp_buf = [0u8; 1500];
let (len, remote_addr) =
tokio::time::timeout(deadline - now, udp.recv_from(udp_buf.as_mut_slice()))
.await??;
let mut locked_receiver = self.stun_packet_receiver.lock().await;
let stun_packet_raw = tokio::time::timeout(deadline - now, locked_receiver.recv())
.await?
.with_context(|| "recv stun packet from broadcast channel error")?;
now = tokio::time::Instant::now();
let (len, remote_addr) = (stun_packet_raw.data.len(), stun_packet_raw.addr);
if len < 20 {
continue;
}
let udp_buf = stun_packet_raw.data;
// TODO:: we cannot borrow `buf` directly in udp recv_from, so we copy it here
unsafe { std::ptr::copy(udp_buf.as_ptr(), buf.as_ptr() as *mut u8, len) };
@@ -136,18 +170,6 @@ impl Stun {
continue;
}
// some stun server use changed socket even we don't ask for.
if expected_ip_changed && stun_host.ip() == remote_addr.ip() {
continue;
}
if expected_port_changed
&& stun_host.ip() == remote_addr.ip()
&& stun_host.port() == remote_addr.port()
{
continue;
}
return Ok((msg, remote_addr));
}
@@ -196,16 +218,14 @@ impl Stun {
#[tracing::instrument(ret, err, level = Level::DEBUG)]
pub async fn bind_request(
&self,
source_port: u16,
self,
change_ip: bool,
change_port: bool,
) -> Result<BindRequestResponse, Error> {
let stun_host = self.stun_server;
let udp = UdpSocket::bind(format!("0.0.0.0:{}", source_port)).await?;
// repeat req in case of packet loss
let mut tids = vec![];
for _ in 0..self.req_repeat {
let tid = rand::random::<u32>();
// let tid = 1;
@@ -222,16 +242,19 @@ impl Stun {
let msg = encoder
.encode_into_bytes(message.clone())
.with_context(|| "encode stun message")?;
tids.push(tid as u128);
tracing::trace!(?message, ?msg, tid, "send stun request");
udp.send_to(msg.as_slice().into(), &stun_host).await?;
tracing::debug!(?message, ?msg, tid, "send stun request");
self.socket
.send_to(msg.as_slice().into(), &stun_host)
.await?;
}
let now = Instant::now();
tracing::trace!("waiting stun response");
let mut buf = [0; 1620];
let (msg, recv_addr) = self
.wait_stun_response(&mut buf, &udp, &tids, change_ip, change_port, &stun_host)
.wait_stun_response(&mut buf, &tids, change_ip, change_port, &stun_host)
.await?;
let changed_socket_addr = Self::extract_changed_addr(&msg);
@@ -239,16 +262,18 @@ impl Stun {
let real_port_changed = stun_host.port() != recv_addr.port();
let resp = BindRequestResponse {
source_addr: udp.local_addr()?,
send_to_addr: stun_host,
local_addr: self.socket.local_addr()?,
stun_server_addr: stun_host,
recv_from_addr: recv_addr,
mapped_socket_addr: Self::extrace_mapped_addr(&msg),
changed_socket_addr,
ip_changed: change_ip,
port_changed: change_port,
change_ip,
change_port,
real_ip_changed,
real_port_changed,
latency_us: now.elapsed().as_micros() as u32,
};
tracing::debug!(
@@ -262,105 +287,256 @@ impl Stun {
}
}
pub struct UdpNatTypeDetector {
stun_servers: Vec<String>,
struct StunClientBuilder {
udp: Arc<UdpSocket>,
task_set: JoinSet<()>,
stun_packet_sender: broadcast::Sender<StunPacket>,
}
impl UdpNatTypeDetector {
pub fn new(stun_servers: Vec<String>) -> Self {
Self { stun_servers }
}
impl StunClientBuilder {
pub fn new(udp: Arc<UdpSocket>) -> Self {
let (stun_packet_sender, _) = broadcast::channel(1024);
let mut task_set = JoinSet::new();
pub async fn get_udp_nat_type(&self, mut source_port: u16) -> NatType {
// Like classic STUN (rfc3489). Detect NAT behavior for UDP.
// Modified from rfc3489. Requires at least two STUN servers.
let mut ret_test1_1 = None;
let mut ret_test1_2 = None;
let mut ret_test2 = None;
let mut ret_test3 = None;
if source_port == 0 {
let udp = UdpSocket::bind("0.0.0.0:0").await.unwrap();
source_port = udp.local_addr().unwrap().port();
}
let mut succ = false;
let mut ips = HostResolverIter::new(self.stun_servers.clone());
while let Some(server_ip) = ips.next().await {
let stun = Stun::new(server_ip.clone());
let ret = stun.bind_request(source_port, false, false).await;
if ret.is_err() {
// Try another STUN server
continue;
}
if ret_test1_1.is_none() {
ret_test1_1 = ret.ok();
continue;
}
ret_test1_2 = ret.ok();
let ret = stun.bind_request(source_port, true, true).await;
if let Ok(resp) = ret {
if !resp.real_ip_changed || !resp.real_port_changed {
tracing::debug!(
?server_ip,
?ret,
"stun bind request return with unchanged ip and port"
);
// Try another STUN server
continue;
let udp_clone = udp.clone();
let stun_packet_sender_clone = stun_packet_sender.clone();
task_set.spawn(
async move {
let mut buf = [0; 1620];
tracing::info!("start stun packet listener");
loop {
let Ok((len, addr)) = udp_clone.recv_from(&mut buf).await else {
tracing::error!("udp recv_from error");
break;
};
let data = buf[..len].to_vec();
tracing::debug!(?addr, ?data, "recv udp stun packet");
let _ = stun_packet_sender_clone.send(StunPacket { data, addr });
}
}
ret_test2 = ret.ok();
ret_test3 = stun.bind_request(source_port, false, true).await.ok();
tracing::debug!(?ret_test3, "stun bind request with changed port");
succ = true;
break;
}
.instrument(tracing::info_span!("stun_packet_listener")),
);
if !succ {
Self {
udp,
task_set,
stun_packet_sender,
}
}
pub fn new_stun_client(&self, stun_server: SocketAddr) -> StunClient {
StunClient::new(
stun_server,
self.udp.clone(),
self.stun_packet_sender.subscribe(),
)
}
pub async fn stop(&mut self) {
self.task_set.abort_all();
while let Some(_) = self.task_set.join_next().await {}
}
}
#[derive(Debug, Clone)]
pub struct UdpNatTypeDetectResult {
source_addr: SocketAddr,
stun_resps: Vec<BindRequestResponse>,
}
impl UdpNatTypeDetectResult {
fn new(source_addr: SocketAddr, stun_resps: Vec<BindRequestResponse>) -> Self {
Self {
source_addr,
stun_resps,
}
}
fn has_ip_changed_resp(&self) -> bool {
for resp in self.stun_resps.iter() {
if resp.real_ip_changed {
return true;
}
}
false
}
fn has_port_changed_resp(&self) -> bool {
for resp in self.stun_resps.iter() {
if resp.real_port_changed {
return true;
}
}
false
}
fn is_open_internet(&self) -> bool {
for resp in self.stun_resps.iter() {
if resp.mapped_socket_addr == Some(self.source_addr) {
return true;
}
}
return false;
}
fn is_pat(&self) -> bool {
for resp in self.stun_resps.iter() {
if resp.mapped_socket_addr.map(|x| x.port()) == Some(self.source_addr.port()) {
return true;
}
}
false
}
fn stun_server_count(&self) -> usize {
// find resp with distinct stun server
self.stun_resps
.iter()
.map(|x| x.stun_server_addr)
.collect::<BTreeSet<_>>()
.len()
}
fn is_cone(&self) -> bool {
// if unique mapped addr count is less than stun server count, it is cone
let mapped_addr_count = self
.stun_resps
.iter()
.filter_map(|x| x.mapped_socket_addr)
.collect::<BTreeSet<_>>()
.len();
mapped_addr_count < self.stun_server_count()
}
pub fn nat_type(&self) -> NatType {
if self.stun_server_count() < 2 {
return NatType::Unknown;
}
tracing::debug!(
?ret_test1_1,
?ret_test1_2,
?ret_test2,
?ret_test3,
"finish stun test, try to detect nat type"
);
let ret_test1_1 = ret_test1_1.unwrap();
let ret_test1_2 = ret_test1_2.unwrap();
if ret_test1_1.mapped_socket_addr != ret_test1_2.mapped_socket_addr {
return NatType::Symmetric;
}
if ret_test1_1.mapped_socket_addr.is_some()
&& ret_test1_1.source_addr == ret_test1_1.mapped_socket_addr.unwrap()
{
if !ret_test2.is_none() {
return NatType::OpenInternet;
} else {
return NatType::SymUdpFirewall;
}
} else {
if let Some(ret_test2) = ret_test2 {
if source_port == ret_test2.get_mapped_addr_no_check().port()
&& source_port == ret_test1_1.get_mapped_addr_no_check().port()
{
if self.is_cone() {
if self.has_ip_changed_resp() {
if self.is_open_internet() {
return NatType::OpenInternet;
} else if self.is_pat() {
return NatType::NoPat;
} else {
return NatType::FullCone;
}
} else if self.has_port_changed_resp() {
return NatType::Restricted;
} else {
if !ret_test3.is_none() {
return NatType::Restricted;
} else {
return NatType::PortRestricted;
}
return NatType::PortRestricted;
}
} else if !self.stun_resps.is_empty() {
return NatType::Symmetric;
} else {
return NatType::Unknown;
}
}
pub fn public_ips(&self) -> Vec<IpAddr> {
self.stun_resps
.iter()
.filter_map(|x| x.mapped_socket_addr.map(|x| x.ip()))
.collect::<BTreeSet<_>>()
.into_iter()
.collect()
}
pub fn collect_available_stun_server(&self) -> Vec<SocketAddr> {
let mut ret = vec![];
for resp in self.stun_resps.iter() {
if !ret.contains(&resp.stun_server_addr) {
ret.push(resp.stun_server_addr);
}
}
ret
}
pub fn local_addr(&self) -> SocketAddr {
self.source_addr
}
pub fn extend_result(&mut self, other: UdpNatTypeDetectResult) {
self.stun_resps.extend(other.stun_resps);
}
pub fn min_port(&self) -> u16 {
self.stun_resps
.iter()
.filter_map(|x| x.mapped_socket_addr.map(|x| x.port()))
.min()
.unwrap_or(0)
}
pub fn max_port(&self) -> u16 {
self.stun_resps
.iter()
.filter_map(|x| x.mapped_socket_addr.map(|x| x.port()))
.max()
.unwrap_or(u16::MAX)
}
}
pub struct UdpNatTypeDetector {
stun_server_hosts: Vec<String>,
max_ip_per_domain: u32,
}
impl UdpNatTypeDetector {
pub fn new(stun_server_hosts: Vec<String>, max_ip_per_domain: u32) -> Self {
Self {
stun_server_hosts,
max_ip_per_domain,
}
}
pub async fn detect_nat_type(&self, source_port: u16) -> Result<UdpNatTypeDetectResult, Error> {
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", source_port)).await?);
self.detect_nat_type_with_socket(udp).await
}
#[tracing::instrument(skip(self))]
pub async fn detect_nat_type_with_socket(
&self,
udp: Arc<UdpSocket>,
) -> Result<UdpNatTypeDetectResult, Error> {
let mut stun_servers = vec![];
let mut host_resolver =
HostResolverIter::new(self.stun_server_hosts.clone(), self.max_ip_per_domain);
while let Some(addr) = host_resolver.next().await {
stun_servers.push(addr);
}
let client_builder = StunClientBuilder::new(udp.clone());
let mut stun_task_set = JoinSet::new();
for stun_server in stun_servers.iter() {
stun_task_set.spawn(
client_builder
.new_stun_client(*stun_server)
.bind_request(false, false),
);
stun_task_set.spawn(
client_builder
.new_stun_client(*stun_server)
.bind_request(false, true),
);
stun_task_set.spawn(
client_builder
.new_stun_client(*stun_server)
.bind_request(true, true),
);
}
let mut bind_resps = vec![];
while let Some(resp) = stun_task_set.join_next().await {
if let Ok(Ok(resp)) = resp {
bind_resps.push(resp);
}
}
Ok(UdpNatTypeDetectResult::new(udp.local_addr()?, bind_resps))
}
}
@@ -373,7 +549,8 @@ pub trait StunInfoCollectorTrait: Send + Sync {
pub struct StunInfoCollector {
stun_servers: Arc<RwLock<Vec<String>>>,
udp_nat_type: Arc<AtomicCell<(NatType, std::time::Instant)>>,
udp_nat_test_result: Arc<RwLock<Option<UdpNatTypeDetectResult>>>,
nat_test_result_time: Arc<AtomicCell<chrono::DateTime<Local>>>,
redetect_notify: Arc<tokio::sync::Notify>,
tasks: JoinSet<()>,
}
@@ -381,27 +558,47 @@ pub struct StunInfoCollector {
#[async_trait::async_trait]
impl StunInfoCollectorTrait for StunInfoCollector {
fn get_stun_info(&self) -> StunInfo {
let (typ, time) = self.udp_nat_type.load();
let Some(result) = self.udp_nat_test_result.read().unwrap().clone() else {
return Default::default();
};
StunInfo {
udp_nat_type: typ as i32,
udp_nat_type: result.nat_type() as i32,
tcp_nat_type: 0,
last_update_time: time.elapsed().as_secs() as i64,
last_update_time: self.nat_test_result_time.load().timestamp(),
public_ip: result.public_ips().iter().map(|x| x.to_string()).collect(),
min_port: result.min_port() as u32,
max_port: result.max_port() as u32,
}
}
async fn get_udp_port_mapping(&self, local_port: u16) -> Result<SocketAddr, Error> {
let stun_servers = self.stun_servers.read().await.clone();
let mut ips = HostResolverIter::new(stun_servers.clone());
while let Some(server) = ips.next().await {
let stun = Stun::new(server.clone());
let Ok(ret) = stun.bind_request(local_port, false, false).await else {
let stun_servers = self
.udp_nat_test_result
.read()
.unwrap()
.clone()
.map(|x| x.collect_available_stun_server())
.ok_or(Error::NotFound)?;
let udp = Arc::new(UdpSocket::bind(format!("0.0.0.0:{}", local_port)).await?);
let mut client_builder = StunClientBuilder::new(udp.clone());
for server in stun_servers.iter() {
let Ok(ret) = client_builder
.new_stun_client(*server)
.bind_request(false, false)
.await
else {
tracing::warn!(?server, "stun bind request failed");
continue;
};
if let Some(mapped_addr) = ret.mapped_socket_addr {
// make sure udp socket is available after return ok.
client_builder.stop().await;
return Ok(mapped_addr);
}
}
Err(Error::NotFound)
}
}
@@ -410,10 +607,8 @@ impl StunInfoCollector {
pub fn new(stun_servers: Vec<String>) -> Self {
let mut ret = Self {
stun_servers: Arc::new(RwLock::new(stun_servers)),
udp_nat_type: Arc::new(AtomicCell::new((
NatType::Unknown,
std::time::Instant::now(),
))),
udp_nat_test_result: Arc::new(RwLock::new(None)),
nat_test_result_time: Arc::new(AtomicCell::new(Local::now())),
redetect_notify: Arc::new(tokio::sync::Notify::new()),
tasks: JoinSet::new(),
};
@@ -431,46 +626,78 @@ impl StunInfoCollector {
// NOTICE: we may need to choose stun stun server based on geo location
// stun server cross nation may return a external ip address with high latency and loss rate
vec![
"stun.miwifi.com:3478".to_string(),
"stun.chat.bilibili.com:3478".to_string(), // bilibili's stun server doesn't repond to change_ip and change_port
"stun.cloudflare.com:3478".to_string(),
"stun.syncthing.net:3478".to_string(),
"stun.isp.net.au:3478".to_string(),
"stun.nextcloud.com:3478".to_string(),
"stun.freeswitch.org:3478".to_string(),
"stun.voip.blackberry.com:3478".to_string(),
"stunserver.stunprotocol.org:3478".to_string(),
"stun.sipnet.com:3478".to_string(),
"stun.radiojar.com:3478".to_string(),
"stun.sonetel.com:3478".to_string(),
"stun.voipgate.com:3478".to_string(),
"stun.miwifi.com",
"stun.cdnbye.com",
"stun.hitv.com",
"stun.chat.bilibili.com",
"stun.douyucdn.cn:18000",
"fwa.lifesizecloud.com",
"global.turn.twilio.com",
"turn.cloudflare.com",
"stun.isp.net.au",
"stun.nextcloud.com",
"stun.freeswitch.org",
"stun.voip.blackberry.com",
"stunserver.stunprotocol.org",
"stun.sipnet.com",
"stun.radiojar.com",
"stun.sonetel.com",
]
.iter()
.map(|x| x.to_string())
.collect()
}
fn start_stun_routine(&mut self) {
let stun_servers = self.stun_servers.clone();
let udp_nat_type = self.udp_nat_type.clone();
let udp_nat_test_result = self.udp_nat_test_result.clone();
let udp_test_time = self.nat_test_result_time.clone();
let redetect_notify = self.redetect_notify.clone();
self.tasks.spawn(async move {
loop {
let detector = UdpNatTypeDetector::new(stun_servers.read().await.clone());
let old_nat_type = udp_nat_type.load().0;
let mut ret = NatType::Unknown;
for _ in 1..5 {
// if nat type degrade, sleep and retry. so result can be relatively stable.
ret = detector.get_udp_nat_type(0).await;
if ret == NatType::Unknown || ret <= old_nat_type {
break;
let servers = stun_servers.read().unwrap().clone();
// use first three and random choose one from the rest
let servers = servers
.iter()
.take(2)
.chain(servers.iter().skip(2).choose(&mut rand::thread_rng()))
.map(|x| x.to_string())
.collect();
let detector = UdpNatTypeDetector::new(servers, 1);
let ret = detector.detect_nat_type(0).await;
tracing::debug!(?ret, "finish udp nat type detect");
let mut nat_type = NatType::Unknown;
let sleep_sec = match &ret {
Ok(resp) => {
*udp_nat_test_result.write().unwrap() = Some(resp.clone());
udp_test_time.store(Local::now());
nat_type = resp.nat_type();
if nat_type == NatType::Unknown {
15
} else {
600
}
}
tokio::time::sleep(Duration::from_secs(5)).await;
}
udp_nat_type.store((ret, std::time::Instant::now()));
let sleep_sec = match ret {
NatType::Unknown => 15,
_ => 60,
_ => 15,
};
tracing::info!(?ret, ?sleep_sec, "finish udp nat type detect");
// if nat type is symmtric, detect with another port to gather more info
if nat_type == NatType::Symmetric {
let old_resp = ret.unwrap();
let old_local_port = old_resp.local_addr().port();
let new_port = if old_local_port >= 65535 {
old_local_port - 1
} else {
old_local_port + 1
};
let ret = detector.detect_nat_type(new_port).await;
tracing::debug!(?ret, "finish udp nat type detect with another port");
if let Ok(resp) = ret {
udp_nat_test_result.write().unwrap().as_mut().map(|x| {
x.extend_result(resp);
});
}
}
tokio::select! {
_ = redetect_notify.notified() => {}
@@ -483,53 +710,26 @@ impl StunInfoCollector {
pub fn update_stun_info(&self) {
self.redetect_notify.notify_one();
}
pub async fn set_stun_servers(&self, stun_servers: Vec<String>) {
*self.stun_servers.write().await = stun_servers;
self.update_stun_info();
}
}
#[cfg(test)]
mod tests {
use super::*;
pub fn enable_log() {
let filter = tracing_subscriber::EnvFilter::builder()
.with_default_directive(tracing::level_filters::LevelFilter::TRACE.into())
.from_env()
.unwrap()
.add_directive("tarpc=error".parse().unwrap());
tracing_subscriber::fmt::fmt()
.pretty()
.with_env_filter(filter)
.init();
}
#[tokio::test]
async fn test_stun_bind_request() {
// miwifi / qq seems not correctly responde to change_ip and change_port, they always try to change the src ip and port.
// let mut ips = HostResolverIter::new(vec!["stun1.l.google.com:19302".to_string()]);
let mut ips_ = HostResolverIter::new(vec!["stun.canets.org:3478".to_string()]);
let mut ips = vec![];
while let Some(ip) = ips_.next().await {
ips.push(ip);
async fn test_udp_nat_type_detector() {
let collector = StunInfoCollector::new_with_default_servers();
collector.update_stun_info();
loop {
let ret = collector.get_stun_info();
if ret.udp_nat_type != NatType::Unknown as i32 {
println!("{:#?}", ret);
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
println!("ip: {:?}", ips);
for ip in ips.iter() {
let stun = Stun::new(ip.clone());
let _rs = stun.bind_request(12345, true, true).await;
}
}
#[tokio::test]
async fn test_udp_nat_type_detect() {
let detector = UdpNatTypeDetector::new(vec![
"stun.counterpath.com:3478".to_string(),
"180.235.108.91:3478".to_string(),
]);
let ret = detector.get_udp_nat_type(0).await;
assert_ne!(ret, NatType::Unknown);
let port_mapping = collector.get_udp_port_mapping(3000).await;
println!("{:#?}", port_mapping);
}
}

View File

@@ -1,11 +1,12 @@
use std::net::SocketAddr;
use bytecodec::fixnum::{U32beDecoder, U32beEncoder};
use stun_codec::net::{socket_addr_xor, SocketAddrDecoder, SocketAddrEncoder};
use stun_codec::rfc5389::attributes::{
MappedAddress, Software, XorMappedAddress, XorMappedAddress2,
};
use stun_codec::rfc5780::attributes::{ChangeRequest, OtherAddress, ResponseOrigin};
use stun_codec::rfc5780::attributes::{OtherAddress, ResponseOrigin};
use stun_codec::{define_attribute_enums, AttributeType, Message, TransactionId};
use bytecodec::{ByteCount, Decode, Encode, Eos, Result, SizedEncode, TryTaggedDecode};
@@ -197,6 +198,75 @@ impl_encode!(SourceAddressEncoder, SourceAddress, |item: Self::Item| {
item.0
});
/// `CHANGE-REQUEST` attribute.
///
/// See [RFC 5780 -- 7.2. CHANGE-REQUEST] about this attribute.
///
/// [RFC 5780 -- 7.2. CHANGE-REQUEST]: https://tools.ietf.org/html/rfc5780#section-7.2
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct ChangeRequest(bool, bool);
impl ChangeRequest {
/// The codepoint of the type of the attribute.
pub const CODEPOINT: u16 = 0x0003;
/// Makes a new `ChangeRequest` instance.
pub fn new(ip: bool, port: bool) -> Self {
ChangeRequest(ip, port)
}
/// Returns whether the client requested the server to send the Binding Response with a
/// different IP address than the one the Binding Request was received on
pub fn ip(&self) -> bool {
self.0
}
/// Returns whether the client requested the server to send the Binding Response with a
/// different port than the one the Binding Request was received on
pub fn port(&self) -> bool {
self.1
}
}
impl stun_codec::Attribute for ChangeRequest {
type Decoder = ChangeRequestDecoder;
type Encoder = ChangeRequestEncoder;
fn get_type(&self) -> AttributeType {
AttributeType::new(Self::CODEPOINT)
}
}
/// [`ChangeRequest`] decoder.
#[derive(Debug, Default)]
pub struct ChangeRequestDecoder(U32beDecoder);
impl ChangeRequestDecoder {
/// Makes a new `ChangeRequestDecoder` instance.
pub fn new() -> Self {
Self::default()
}
}
impl_decode!(ChangeRequestDecoder, ChangeRequest, |item| {
Ok(ChangeRequest((item & 0x4) != 0, (item & 0x2) != 0))
});
/// [`ChangeRequest`] encoder.
#[derive(Debug, Default)]
pub struct ChangeRequestEncoder(U32beEncoder);
impl ChangeRequestEncoder {
/// Makes a new `ChangeRequestEncoder` instance.
pub fn new() -> Self {
Self::default()
}
}
impl_encode!(ChangeRequestEncoder, ChangeRequest, |item: Self::Item| {
let ip = item.0 as u8;
let port = item.1 as u8;
((ip << 1 | port) << 1) as u32
});
pub fn tid_to_u128(tid: &TransactionId) -> u128 {
let mut tid_buf = [0u8; 16];
// copy bytes from msg_tid to tid_buf

View File

@@ -167,7 +167,6 @@ impl ManualConnectorManager {
let mut reconn_interval = tokio::time::interval(std::time::Duration::from_millis(
use_global_var!(MANUAL_CONNECTOR_RECONNECT_INTERVAL_MS),
));
let mut reconn_tasks = JoinSet::new();
let (reconn_result_send, mut reconn_result_recv) = mpsc::channel(100);
loop {
@@ -176,8 +175,8 @@ impl ManualConnectorManager {
if let Ok(event) = event {
Self::handle_event(&event, data.clone()).await;
} else {
log::warn!("event_recv closed");
panic!("event_recv closed");
tracing::warn!(?event, "event_recv got error");
panic!("event_recv got error, err: {:?}", event);
}
}
@@ -192,16 +191,20 @@ impl ManualConnectorManager {
let (_, connector) = data.connectors.remove(&dead_url).unwrap();
let insert_succ = data.reconnecting.insert(dead_url.clone());
assert!(insert_succ);
reconn_tasks.spawn(async move {
sender.send(Self::conn_reconnect(data_clone.clone(), dead_url, connector).await).await.unwrap();
tokio::spawn(async move {
let reconn_ret = Self::conn_reconnect(data_clone.clone(), dead_url.clone(), connector.clone()).await;
sender.send(reconn_ret).await.unwrap();
data_clone.reconnecting.remove(&dead_url).unwrap();
data_clone.connectors.insert(dead_url.clone(), connector);
});
}
log::info!("reconn_interval tick, done");
}
ret = reconn_result_recv.recv() => {
log::warn!("reconn_tasks done, out: {:?}", ret);
let _ = reconn_tasks.join_next().await.unwrap();
log::warn!("reconn_tasks done, reconn result: {:?}", ret);
}
}
}
@@ -355,10 +358,13 @@ impl ManualConnectorManager {
} else if ret.as_ref().unwrap().is_err() {
reconn_ret = Err(ret.unwrap().unwrap_err());
}
data.global_ctx.issue_event(GlobalCtxEvent::ConnectError(
dead_url.clone(),
format!("{:?}", ip_version),
format!("{:?}", reconn_ret),
));
}
}
data.reconnecting.remove(&dead_url).unwrap();
data.connectors.insert(dead_url.clone(), connector);
reconn_ret
}

View File

@@ -3,16 +3,15 @@ use std::{
sync::Arc,
};
#[cfg(feature = "quic")]
use crate::tunnel::quic::QUICTunnelConnector;
#[cfg(feature = "wireguard")]
use crate::tunnel::wireguard::{WgConfig, WgTunnelConnector};
use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, network::IPCollector},
tunnel::{
check_scheme_and_get_socket_addr,
quic::QUICTunnelConnector,
ring::RingTunnelConnector,
tcp::TcpTunnelConnector,
udp::UdpTunnelConnector,
wireguard::{WgConfig, WgTunnelConnector},
TunnelConnector,
check_scheme_and_get_socket_addr, ring::RingTunnelConnector, tcp::TcpTunnelConnector,
udp::UdpTunnelConnector, FromUrl, IpVersion, TunnelConnector,
},
};
@@ -77,6 +76,7 @@ pub async fn create_connector_by_url(
let connector = RingTunnelConnector::new(url);
return Ok(Box::new(connector));
}
#[cfg(feature = "quic")]
"quic" => {
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "quic")?;
let mut connector = QUICTunnelConnector::new(url);
@@ -88,6 +88,7 @@ pub async fn create_connector_by_url(
.await;
return Ok(Box::new(connector));
}
#[cfg(feature = "wireguard")]
"wg" => {
let dst_addr = check_scheme_and_get_socket_addr::<SocketAddr>(&url, "wg")?;
let nid = global_ctx.get_network_identity();
@@ -104,6 +105,18 @@ pub async fn create_connector_by_url(
.await;
return Ok(Box::new(connector));
}
#[cfg(feature = "websocket")]
"ws" | "wss" => {
let dst_addr = SocketAddr::from_url(url.clone(), IpVersion::Both)?;
let mut connector = crate::tunnel::websocket::WSTunnelConnector::new(url);
set_bind_addr_for_peer_connector(
&mut connector,
dst_addr.is_ipv4(),
&global_ctx.get_ip_collector(),
)
.await;
return Ok(Box::new(connector));
}
_ => {
return Err(Error::InvalidUrl(url.into()));
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,11 @@
#![allow(dead_code)]
use std::{net::SocketAddr, vec};
use std::{net::SocketAddr, time::Duration, vec};
use clap::{command, Args, Parser, Subcommand};
use common::stun::StunInfoCollectorTrait;
use rpc::vpn_portal_rpc_client::VpnPortalRpcClient;
use tokio::time::timeout;
use utils::{list_peer_route_pair, PeerRoutePair};
mod arch;
@@ -13,7 +15,7 @@ mod tunnel;
mod utils;
use crate::{
common::stun::{StunInfoCollector, UdpNatTypeDetector},
common::stun::StunInfoCollector,
rpc::{
connector_manage_rpc_client::ConnectorManageRpcClient,
peer_center_rpc_client::PeerCenterRpcClient, peer_manage_rpc_client::PeerManageRpcClient,
@@ -25,7 +27,7 @@ use humansize::format_size;
use tabled::settings::Style;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
#[command(name = "easytier-cli", author, version, about, long_about = None)]
struct Cli {
/// the instance name
#[arg(short = 'p', long, default_value = "127.0.0.1:15888")]
@@ -309,8 +311,19 @@ async fn main() -> Result<(), Error> {
handler.handle_route_list().await?;
}
SubCommand::Stun => {
let stun = UdpNatTypeDetector::new(StunInfoCollector::get_default_servers());
println!("udp type: {:?}", stun.get_udp_nat_type(0).await);
timeout(Duration::from_secs(5), async move {
let collector = StunInfoCollector::new_with_default_servers();
loop {
let ret = collector.get_stun_info();
if ret.udp_nat_type != NatType::Unknown as i32 {
println!("stun info: {:#?}", ret);
break;
}
tokio::time::sleep(Duration::from_millis(200)).await;
}
})
.await
.unwrap();
}
SubCommand::PeerCenter => {
let mut peer_center_client = handler.get_peer_center_client().await?;
@@ -331,13 +344,7 @@ async fn main() -> Result<(), Error> {
let direct_peers = v
.direct_peers
.iter()
.map(|(k, v)| {
format!(
"{}:{:?}",
k,
LatencyLevel::try_from(v.latency_level).unwrap()
)
})
.map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,))
.collect::<Vec<_>>();
table_rows.push(PeerCenterTableItem {
node_id: node_id.to_string(),
@@ -360,8 +367,15 @@ async fn main() -> Result<(), Error> {
.into_inner()
.vpn_portal_info
.unwrap_or_default();
println!("portal_name: {}\n", resp.vpn_type);
println!("client_config:{}", resp.client_config);
println!("portal_name: {}", resp.vpn_type);
println!(
r#"
############### client_config_start ###############
{}
############### client_config_end ###############
"#,
resp.client_config
);
println!("connected_clients:\n{:#?}", resp.connected_clients);
}
}

View File

@@ -3,7 +3,12 @@
#[cfg(test)]
mod tests;
use std::{backtrace, io::Write as _, net::SocketAddr};
use std::{
backtrace,
io::Write as _,
net::{Ipv4Addr, SocketAddr},
path::PathBuf,
};
use anyhow::Context;
use clap::Parser;
@@ -17,29 +22,40 @@ mod peer_center;
mod peers;
mod rpc;
mod tunnel;
mod utils;
mod vpn_portal;
use common::{
config::{ConsoleLoggerConfig, FileLoggerConfig, NetworkIdentity, PeerConfig, VpnPortalConfig},
get_logger_timer_rfc3339,
use common::config::{
ConsoleLoggerConfig, FileLoggerConfig, NetworkIdentity, PeerConfig, VpnPortalConfig,
};
use instance::instance::Instance;
use tracing::level_filters::LevelFilter;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer};
use tokio::net::TcpSocket;
use crate::common::{
config::{ConfigLoader, TomlConfigLoader},
global_ctx::GlobalCtxEvent,
use crate::{
common::{
config::{ConfigLoader, TomlConfigLoader},
global_ctx::GlobalCtxEvent,
},
utils::init_logger,
};
#[cfg(feature = "mimalloc")]
use mimalloc_rust::*;
#[cfg(feature = "mimalloc")]
#[global_allocator]
static GLOBAL_MIMALLOC: GlobalMiMalloc = GlobalMiMalloc;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
#[command(name = "easytier-core", author, version, about, long_about = None)]
struct Cli {
#[arg(
short,
long,
help = "path to the config file, NOTE: if this is set, all other options will be ignored"
)]
config_file: Option<PathBuf>,
#[arg(
long,
help = "network name to identify this vpn network",
@@ -53,10 +69,21 @@ struct Cli {
)]
network_secret: String,
#[arg(short, long, help = "ipv4 address of this vpn node")]
#[arg(
short,
long,
help = "ipv4 address of this vpn node, if empty, this node will only forward packets and no TUN device will be created"
)]
ipv4: Option<String>,
#[arg(short, long, help = "peers to connect initially")]
#[arg(
short,
long,
help = "automatically determine and set IP address by Easytier, and the IP address starts from 10.0.0.1 by default. Warning, if there is an IP conflict in the network when using DHCP, the IP will be automatically changed."
)]
dhcp: bool,
#[arg(short, long, help = "peers to connect initially", num_args = 0..)]
peers: Vec<String>,
#[arg(short, long, help = "use a public shared node to discover peers")]
@@ -72,20 +99,28 @@ struct Cli {
#[arg(
short,
long,
default_value = "127.0.0.1:15888",
help = "rpc portal address to listen for management"
default_value = "0",
help = "rpc portal address to listen for management. 0 means random
port, 12345 means listen on 12345 of localhost, 0.0.0.0:12345 means
listen on 12345 of all interfaces. default is 0 and will try 15888 first"
)]
rpc_portal: SocketAddr,
rpc_portal: String,
#[arg(short, long, help = "listeners to accept connections, pass '' to avoid listening.",
default_values_t = ["tcp://0.0.0.0:11010".to_string(),
"udp://0.0.0.0:11010".to_string(),
"wg://0.0.0.0:11011".to_string()])]
#[arg(short, long, help = "listeners to accept connections, allow format:
a port number: 11010, means tcp/udp will listen on 11010, ws/wss will listen on 11010 and 11011, wg will listen on 11011
url: tcp://0.0.0.0:11010, tcp can be tcp, udp, ring, wg, ws, wss,
proto:port: wg:11011, means listen on 11011 with wireguard protocol
url and proto:port can occur multiple times.
", default_values_t = ["11010".to_string()],
num_args = 0..)]
listeners: Vec<String>,
/// specify the linux network namespace, default is the root namespace
#[arg(long)]
net_ns: Option<String>,
#[arg(
long,
help = "do not listen on any port, only connect to peers",
default_value = "false"
)]
no_listener: bool,
#[arg(long, help = "console log level",
value_parser = clap::builder::PossibleValuesParser::new(["trace", "debug", "info", "warn", "error", "off"]))]
@@ -97,6 +132,9 @@ struct Cli {
#[arg(long, help = "directory to store log files")]
file_log_dir: Option<String>,
#[arg(long, help = "host name to identify this device")]
hostname: Option<String>,
#[arg(
short = 'm',
long,
@@ -105,13 +143,6 @@ struct Cli {
)]
instance_name: String,
#[arg(
short = 'd',
long,
help = "instance uuid to identify this vpn node in whole vpn network example: 123e4567-e89b-12d3-a456-426614174000"
)]
instance_id: Option<String>,
#[arg(
long,
help = "url that defines the vpn portal, allow other vpn clients to connect.
@@ -140,24 +171,142 @@ and the vpn client is in network of 10.14.14.0/24"
#[arg(long, help = "do not use ipv6", default_value = "false")]
disable_ipv6: bool,
#[arg(
long,
help = "mtu of the TUN device, default is 1420 for non-encryption, 1400 for encryption"
)]
mtu: Option<u16>,
#[arg(
long,
help = "path to the log file, if not set, will print to stdout",
default_value = "false"
)]
latency_first: bool,
#[arg(
long,
help = "exit nodes to forward all traffic to, a virtual ipv4 address, priority is determined by the order of the list",
num_args = 0..
)]
exit_nodes: Vec<Ipv4Addr>,
#[arg(
long,
help = "allow this node to be an exit node, default is false",
default_value = "false"
)]
enable_exit_node: bool,
}
impl Cli {
fn parse_listeners(&self) -> Vec<String> {
println!("parsing listeners: {:?}", self.listeners);
let proto_port_offset = vec![("tcp", 0), ("udp", 0), ("wg", 1), ("ws", 1), ("wss", 2)];
if self.no_listener || self.listeners.is_empty() {
return vec![];
}
let origin_listners = self.listeners.clone();
let mut listeners: Vec<String> = Vec::new();
if origin_listners.len() == 1 {
if let Ok(port) = origin_listners[0].parse::<u16>() {
for (proto, offset) in proto_port_offset {
listeners.push(format!("{}://0.0.0.0:{}", proto, port + offset));
}
return listeners;
}
}
for l in &origin_listners {
let proto_port: Vec<&str> = l.split(':').collect();
if proto_port.len() > 2 {
if let Ok(url) = l.parse::<url::Url>() {
listeners.push(url.to_string());
} else {
panic!("failed to parse listener: {}", l);
}
} else {
let Some((proto, offset)) = proto_port_offset
.iter()
.find(|(proto, _)| *proto == proto_port[0])
else {
panic!("unknown protocol: {}", proto_port[0]);
};
let port = if proto_port.len() == 2 {
proto_port[1].parse::<u16>().unwrap()
} else {
11010 + offset
};
listeners.push(format!("{}://0.0.0.0:{}", proto, port));
}
}
println!("parsed listeners: {:?}", listeners);
listeners
}
fn check_tcp_available(port: u16) -> Option<SocketAddr> {
let s = format!("127.0.0.1:{}", port).parse::<SocketAddr>().unwrap();
TcpSocket::new_v4().unwrap().bind(s).map(|_| s).ok()
}
fn parse_rpc_portal(&self) -> SocketAddr {
if let Ok(port) = self.rpc_portal.parse::<u16>() {
if port == 0 {
// check tcp 15888 first
for i in 15888..15900 {
if let Some(s) = Cli::check_tcp_available(i) {
return s;
}
}
return "127.0.0.1:0".parse().unwrap();
}
return format!("127.0.0.1:{}", port).parse().unwrap();
}
self.rpc_portal.parse().unwrap()
}
}
impl From<Cli> for TomlConfigLoader {
fn from(cli: Cli) -> Self {
if let Some(config_file) = &cli.config_file {
println!(
"NOTICE: loading config file: {:?}, will ignore all command line flags\n",
config_file
);
return TomlConfigLoader::new(config_file)
.with_context(|| format!("failed to load config file: {:?}", cli.config_file))
.unwrap();
}
let cfg = TomlConfigLoader::default();
cfg.set_inst_name(cli.instance_name.clone());
cfg.set_hostname(cli.hostname.clone());
cfg.set_network_identity(NetworkIdentity::new(
cli.network_name.clone(),
cli.network_secret.clone(),
));
cfg.set_netns(cli.net_ns.clone());
if let Some(ipv4) = &cli.ipv4 {
cfg.set_ipv4(
ipv4.parse()
.with_context(|| format!("failed to parse ipv4 address: {}", ipv4))
.unwrap(),
)
cfg.set_dhcp(cli.dhcp);
if !cli.dhcp {
if let Some(ipv4) = &cli.ipv4 {
cfg.set_ipv4(Some(
ipv4.parse()
.with_context(|| format!("failed to parse ipv4 address: {}", ipv4))
.unwrap(),
))
}
}
cfg.set_peers(
@@ -173,19 +322,9 @@ impl From<Cli> for TomlConfigLoader {
);
cfg.set_listeners(
cli.listeners
.iter()
.filter_map(|s| {
if s.is_empty() {
return None;
}
Some(
s.parse()
.with_context(|| format!("failed to parse listener uri: {}", s))
.unwrap(),
)
})
cli.parse_listeners()
.into_iter()
.map(|s| s.parse().unwrap())
.collect(),
);
@@ -197,7 +336,7 @@ impl From<Cli> for TomlConfigLoader {
);
}
cfg.set_rpc_portal(cli.rpc_portal);
cfg.set_rpc_portal(cli.parse_rpc_portal());
if cli.external_node.is_some() {
let mut old_peers = cfg.get_peers();
@@ -270,64 +409,19 @@ impl From<Cli> for TomlConfigLoader {
}
f.enable_encryption = !cli.disable_encryption;
f.enable_ipv6 = !cli.disable_ipv6;
f.latency_first = cli.latency_first;
if let Some(mtu) = cli.mtu {
f.mtu = mtu;
}
f.enable_exit_node = cli.enable_exit_node;
cfg.set_flags(f);
cfg.set_exit_nodes(cli.exit_nodes.clone());
cfg
}
}
fn init_logger(config: impl ConfigLoader) {
let file_config = config.get_file_logger_config();
let file_level = file_config
.level
.map(|s| s.parse().unwrap())
.unwrap_or(LevelFilter::OFF);
// logger to rolling file
let mut file_layer = None;
if file_level != LevelFilter::OFF {
let mut l = tracing_subscriber::fmt::layer();
l.set_ansi(false);
let file_filter = EnvFilter::builder()
.with_default_directive(file_level.into())
.from_env()
.unwrap();
let file_appender = tracing_appender::rolling::Builder::new()
.rotation(tracing_appender::rolling::Rotation::DAILY)
.max_log_files(5)
.filename_prefix(file_config.file.unwrap_or("easytier".to_string()))
.build(file_config.dir.unwrap_or("./".to_string()))
.expect("failed to initialize rolling file appender");
file_layer = Some(
l.with_writer(file_appender)
.with_timer(get_logger_timer_rfc3339())
.with_filter(file_filter),
);
}
// logger to console
let console_config = config.get_console_logger_config();
let console_level = console_config
.level
.map(|s| s.parse().unwrap())
.unwrap_or(LevelFilter::OFF);
let console_filter = EnvFilter::builder()
.with_default_directive(console_level.into())
.from_env()
.unwrap();
let console_layer = tracing_subscriber::fmt::layer()
.pretty()
.with_timer(get_logger_timer_rfc3339())
.with_writer(std::io::stderr)
.with_filter(console_filter);
tracing_subscriber::Registry::default()
.with(console_layer)
.with(file_layer)
.init();
}
fn print_event(msg: String) {
println!(
"{}: {}",
@@ -357,7 +451,7 @@ fn setup_panic_handler() {
pub async fn async_main(cli: Cli) {
let cfg: TomlConfigLoader = cli.into();
init_logger(&cfg);
init_logger(&cfg, false).unwrap();
let mut inst = Instance::new(cfg.clone());
let mut events = inst.get_global_ctx().subscribe();
@@ -386,6 +480,13 @@ pub async fn async_main(cli: Cli) {
));
}
GlobalCtxEvent::ListenerAddFailed(p, msg) => {
print_event(format!(
"listener add failed. listener: {}, msg: {}",
p, msg
));
}
GlobalCtxEvent::ListenerAdded(p) => {
if p.scheme() == "ring" {
continue;
@@ -415,8 +516,11 @@ pub async fn async_main(cli: Cli) {
print_event(format!("connecting to peer. dst: {}", dst));
}
GlobalCtxEvent::ConnectError(dst, err) => {
print_event(format!("connect to peer error. dst: {}, err: {}", dst, err));
GlobalCtxEvent::ConnectError(dst, ip_version, err) => {
print_event(format!(
"connect to peer error. dst: {}, ip_version: {}, err: {}",
dst, ip_version, err
));
}
GlobalCtxEvent::VpnPortalClientConnected(portal, client_addr) => {
@@ -432,12 +536,20 @@ pub async fn async_main(cli: Cli) {
portal, client_addr
));
}
GlobalCtxEvent::DhcpIpv4Changed(old, new) => {
print_event(format!("dhcp ip changed. old: {:?}, new: {:?}", old, new));
}
GlobalCtxEvent::DhcpIpv4Conflicted(ip) => {
print_event(format!("dhcp ip conflict. ip: {:?}", ip));
}
}
}
});
println!("Starting easytier with config:");
println!("############### TOML ##############\n");
println!("############### TOML ###############\n");
println!("{}", cfg.dump());
println!("-----------------------------------");

View File

@@ -63,7 +63,7 @@ pub struct IcmpProxy {
peer_manager: Arc<PeerManager>,
cidr_set: CidrSet,
socket: socket2::Socket,
socket: std::sync::Mutex<Option<socket2::Socket>>,
nat_table: IcmpNatTable,
@@ -80,8 +80,8 @@ fn socket_recv(socket: &Socket, buf: &mut [MaybeUninit<u8>]) -> Result<(usize, I
}
fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSender<ZCPacket>) {
let mut buf = [0u8; 4096];
let data: &mut [MaybeUninit<u8>] = unsafe { std::mem::transmute(&mut buf[12..]) };
let mut buf = [0u8; 2048];
let data: &mut [MaybeUninit<u8>] = unsafe { std::mem::transmute(&mut buf[..]) };
loop {
let Ok((len, peer_ip)) = socket_recv(&socket, data) else {
@@ -92,7 +92,7 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe
continue;
}
let Some(mut ipv4_packet) = MutableIpv4Packet::new(&mut buf[12..12 + len]) else {
let Some(mut ipv4_packet) = MutableIpv4Packet::new(&mut buf[..len]) else {
continue;
};
@@ -121,6 +121,10 @@ fn socket_recv_loop(socket: Socket, nat_table: IcmpNatTable, sender: UnboundedSe
};
ipv4_packet.set_destination(dest_ip);
// MacOS do not correctly set ip length when receiving from raw socket
ipv4_packet.set_total_length(len as u16);
ipv4_packet.set_checksum(ipv4::checksum(&ipv4_packet.to_immutable()));
let mut p = ZCPacket::new_with_payload(ipv4_packet.packet());
@@ -154,23 +158,11 @@ impl IcmpProxy {
peer_manager: Arc<PeerManager>,
) -> Result<Arc<Self>, Error> {
let cidr_set = CidrSet::new(global_ctx.clone());
let _g = global_ctx.net_ns.guard();
let socket = socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::RAW,
Some(socket2::Protocol::ICMPV4),
)?;
socket.bind(&socket2::SockAddr::from(SocketAddrV4::new(
std::net::Ipv4Addr::UNSPECIFIED,
0,
)))?;
let ret = Self {
global_ctx,
peer_manager,
cidr_set,
socket,
socket: std::sync::Mutex::new(None),
nat_table: Arc::new(dashmap::DashMap::new()),
tasks: Mutex::new(JoinSet::new()),
@@ -180,6 +172,18 @@ impl IcmpProxy {
}
pub async fn start(self: &Arc<Self>) -> Result<(), Error> {
let _g = self.global_ctx.net_ns.guard();
let socket = socket2::Socket::new(
socket2::Domain::IPV4,
socket2::Type::RAW,
Some(socket2::Protocol::ICMPV4),
)?;
socket.bind(&socket2::SockAddr::from(SocketAddrV4::new(
std::net::Ipv4Addr::UNSPECIFIED,
0,
)))?;
self.socket.lock().unwrap().replace(socket);
self.start_icmp_proxy().await?;
self.start_nat_table_cleaner().await?;
Ok(())
@@ -200,7 +204,7 @@ impl IcmpProxy {
}
async fn start_icmp_proxy(self: &Arc<Self>) -> Result<(), Error> {
let socket = self.socket.try_clone()?;
let socket = self.socket.lock().unwrap().as_ref().unwrap().try_clone()?;
let (sender, mut receiver) = tokio::sync::mpsc::unbounded_channel();
let nat_table = self.nat_table.clone();
thread::spawn(|| {
@@ -233,7 +237,7 @@ impl IcmpProxy {
dst_ip: Ipv4Addr,
icmp_packet: &icmp::echo_request::EchoRequestPacket,
) -> Result<(), Error> {
self.socket.send_to(
self.socket.lock().unwrap().as_ref().unwrap().send_to(
icmp_packet.packet(),
&SocketAddrV4::new(dst_ip.into(), 0).into(),
)?;
@@ -242,8 +246,13 @@ impl IcmpProxy {
}
async fn try_handle_peer_packet(&self, packet: &ZCPacket) -> Option<()> {
if self.cidr_set.is_empty() && !self.global_ctx.enable_exit_node() {
return None;
}
let _ = self.global_ctx.get_ipv4()?;
let hdr = packet.peer_manager_header().unwrap();
let is_exit_node = hdr.is_exit_node();
if hdr.packet_type != PacketType::Data as u8 {
return None;
@@ -256,7 +265,7 @@ impl IcmpProxy {
return None;
}
if !self.cidr_set.contains_v4(ipv4.get_destination()) {
if !self.cidr_set.contains_v4(ipv4.get_destination()) && !is_exit_node {
return None;
}

View File

@@ -356,8 +356,13 @@ impl TcpProxy {
}
async fn try_handle_peer_packet(&self, packet: &mut ZCPacket) -> Option<()> {
if self.cidr_set.is_empty() && !self.global_ctx.enable_exit_node() {
return None;
}
let ipv4_addr = self.global_ctx.get_ipv4()?;
let hdr = packet.peer_manager_header().unwrap();
let is_exit_node = hdr.is_exit_node();
if hdr.packet_type != PacketType::Data as u8 {
return None;
@@ -370,7 +375,7 @@ impl TcpProxy {
return None;
}
if !self.cidr_set.contains_v4(ipv4.get_destination()) {
if !self.cidr_set.contains_v4(ipv4.get_destination()) && !is_exit_node {
return None;
}

View File

@@ -227,12 +227,13 @@ pub struct UdpProxy {
impl UdpProxy {
async fn try_handle_packet(&self, packet: &ZCPacket) -> Option<()> {
if self.cidr_set.is_empty() {
if self.cidr_set.is_empty() && !self.global_ctx.enable_exit_node() {
return None;
}
let _ = self.global_ctx.get_ipv4()?;
let hdr = packet.peer_manager_header().unwrap();
let is_exit_node = hdr.is_exit_node();
if hdr.packet_type != PacketType::Data as u8 {
return None;
};
@@ -242,7 +243,7 @@ impl UdpProxy {
return None;
}
if !self.cidr_set.contains_v4(ipv4.get_destination()) {
if !self.cidr_set.contains_v4(ipv4.get_destination()) && !is_exit_node {
return None;
}

View File

@@ -1,9 +1,11 @@
use std::borrow::BorrowMut;
use std::collections::HashSet;
use std::net::Ipv4Addr;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Weak};
use anyhow::Context;
use cidr::Ipv4Inet;
use futures::{SinkExt, StreamExt};
use pnet::packet::ipv4::Ipv4Packet;
@@ -44,6 +46,8 @@ struct IpProxy {
tcp_proxy: Arc<TcpProxy>,
icmp_proxy: Arc<IcmpProxy>,
udp_proxy: Arc<UdpProxy>,
global_ctx: ArcGlobalCtx,
started: Arc<AtomicBool>,
}
impl IpProxy {
@@ -57,10 +61,19 @@ impl IpProxy {
tcp_proxy,
icmp_proxy,
udp_proxy,
global_ctx,
started: Arc::new(AtomicBool::new(false)),
})
}
async fn start(&self) -> Result<(), Error> {
if (self.global_ctx.get_proxy_cidrs().is_empty() || self.started.load(Ordering::Relaxed))
&& !self.global_ctx.config.get_flags().enable_exit_node
{
return Ok(());
}
self.started.store(true, Ordering::Relaxed);
self.tcp_proxy.start().await?;
self.icmp_proxy.start().await?;
self.udp_proxy.start().await?;
@@ -68,16 +81,216 @@ impl IpProxy {
}
}
struct NicCtx {
global_ctx: ArcGlobalCtx,
peer_mgr: Weak<PeerManager>,
peer_packet_receiver: Arc<Mutex<PacketRecvChanReceiver>>,
nic: Arc<Mutex<virtual_nic::VirtualNic>>,
tasks: JoinSet<()>,
}
impl NicCtx {
fn new(
global_ctx: ArcGlobalCtx,
peer_manager: &Arc<PeerManager>,
peer_packet_receiver: Arc<Mutex<PacketRecvChanReceiver>>,
) -> Self {
NicCtx {
global_ctx: global_ctx.clone(),
peer_mgr: Arc::downgrade(&peer_manager),
peer_packet_receiver,
nic: Arc::new(Mutex::new(virtual_nic::VirtualNic::new(global_ctx))),
tasks: JoinSet::new(),
}
}
async fn assign_ipv4_to_tun_device(&self, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
let nic = self.nic.lock().await;
nic.link_up().await?;
nic.remove_ip(None).await?;
nic.add_ip(ipv4_addr, 24).await?;
if cfg!(target_os = "macos") {
nic.add_route(ipv4_addr, 24).await?;
}
Ok(())
}
async fn do_forward_nic_to_peers_ipv4(ret: ZCPacket, mgr: &PeerManager) {
if let Some(ipv4) = Ipv4Packet::new(ret.payload()) {
if ipv4.get_version() != 4 {
tracing::info!("[USER_PACKET] not ipv4 packet: {:?}", ipv4);
return;
}
let dst_ipv4 = ipv4.get_destination();
tracing::trace!(
?ret,
"[USER_PACKET] recv new packet from tun device and forward to peers."
);
// TODO: use zero-copy
let send_ret = mgr.send_msg_ipv4(ret, dst_ipv4).await;
if send_ret.is_err() {
tracing::trace!(?send_ret, "[USER_PACKET] send_msg_ipv4 failed")
}
} else {
tracing::warn!(?ret, "[USER_PACKET] not ipv4 packet");
}
}
fn do_forward_nic_to_peers(
&mut self,
mut stream: Pin<Box<dyn ZCPacketStream>>,
) -> Result<(), Error> {
// read from nic and write to corresponding tunnel
let Some(mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer manager not available").into());
};
self.tasks.spawn(async move {
while let Some(ret) = stream.next().await {
if ret.is_err() {
log::error!("read from nic failed: {:?}", ret);
break;
}
Self::do_forward_nic_to_peers_ipv4(ret.unwrap(), mgr.as_ref()).await;
}
});
Ok(())
}
fn do_forward_peers_to_nic(&mut self, mut sink: Pin<Box<dyn ZCPacketSink>>) {
let channel = self.peer_packet_receiver.clone();
self.tasks.spawn(async move {
// unlock until coroutine finished
let mut channel = channel.lock().await;
while let Some(packet) = channel.recv().await {
tracing::trace!(
"[USER_PACKET] forward packet from peers to nic. packet: {:?}",
packet
);
let ret = sink.send(packet).await;
if ret.is_err() {
tracing::error!(?ret, "do_forward_tunnel_to_nic sink error");
}
}
});
}
async fn run_proxy_cidrs_route_updater(&mut self) -> Result<(), Error> {
let Some(peer_mgr) = self.peer_mgr.upgrade() else {
return Err(anyhow::anyhow!("peer manager not available").into());
};
let global_ctx = self.global_ctx.clone();
let net_ns = self.global_ctx.net_ns.clone();
let nic = self.nic.lock().await;
let ifcfg = nic.get_ifcfg();
let ifname = nic.ifname().to_owned();
self.tasks.spawn(async move {
let mut cur_proxy_cidrs = vec![];
loop {
let mut proxy_cidrs = vec![];
let routes = peer_mgr.list_routes().await;
for r in routes {
for cidr in r.proxy_cidrs {
let Ok(cidr) = cidr.parse::<cidr::Ipv4Cidr>() else {
continue;
};
proxy_cidrs.push(cidr);
}
}
// add vpn portal cidr to proxy_cidrs
if let Some(vpn_cfg) = global_ctx.config.get_vpn_portal_config() {
proxy_cidrs.push(vpn_cfg.client_cidr);
}
// if route is in cur_proxy_cidrs but not in proxy_cidrs, delete it.
for cidr in cur_proxy_cidrs.iter() {
if proxy_cidrs.contains(cidr) {
continue;
}
let _g = net_ns.guard();
let ret = ifcfg
.remove_ipv4_route(
ifname.as_str(),
cidr.first_address(),
cidr.network_length(),
)
.await;
if ret.is_err() {
tracing::trace!(
cidr = ?cidr,
err = ?ret,
"remove route failed.",
);
}
}
for cidr in proxy_cidrs.iter() {
if cur_proxy_cidrs.contains(cidr) {
continue;
}
let _g = net_ns.guard();
let ret = ifcfg
.add_ipv4_route(
ifname.as_str(),
cidr.first_address(),
cidr.network_length(),
)
.await;
if ret.is_err() {
tracing::trace!(
cidr = ?cidr,
err = ?ret,
"add route failed.",
);
}
}
cur_proxy_cidrs = proxy_cidrs;
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
});
Ok(())
}
async fn run(&mut self, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
let tunnel = {
let mut nic = self.nic.lock().await;
let ret = nic.create_dev().await?;
self.global_ctx
.issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string()));
ret
};
let (stream, sink) = tunnel.split();
self.do_forward_nic_to_peers(stream)?;
self.do_forward_peers_to_nic(sink);
self.assign_ipv4_to_tun_device(ipv4_addr).await?;
self.run_proxy_cidrs_route_updater().await?;
Ok(())
}
}
type ArcNicCtx = Arc<Mutex<Option<NicCtx>>>;
pub struct Instance {
inst_name: String,
id: uuid::Uuid,
virtual_nic: Option<Arc<virtual_nic::VirtualNic>>,
peer_packet_receiver: Option<PacketRecvChanReceiver>,
nic_ctx: ArcNicCtx,
tasks: JoinSet<()>,
peer_packet_receiver: Arc<Mutex<PacketRecvChanReceiver>>,
peer_manager: Arc<PeerManager>,
listener_manager: Arc<Mutex<ListenerManager<PeerManager>>>,
conn_manager: Arc<ManualConnectorManager>,
@@ -130,14 +343,17 @@ impl Instance {
let peer_center = Arc::new(PeerCenterInstance::new(peer_manager.clone()));
#[cfg(feature = "wireguard")]
let vpn_portal_inst = vpn_portal::wireguard::WireGuard::default();
#[cfg(not(feature = "wireguard"))]
let vpn_portal_inst = vpn_portal::NullVpnPortal;
Instance {
inst_name: global_ctx.inst_name.clone(),
id,
virtual_nic: None,
peer_packet_receiver: Some(peer_packet_receiver),
peer_packet_receiver: Arc::new(Mutex::new(peer_packet_receiver)),
nic_ctx: Arc::new(Mutex::new(None)),
tasks: JoinSet::new(),
peer_manager,
@@ -160,78 +376,6 @@ impl Instance {
self.conn_manager.clone()
}
async fn do_forward_nic_to_peers_ipv4(ret: ZCPacket, mgr: &PeerManager) {
if let Some(ipv4) = Ipv4Packet::new(ret.payload()) {
if ipv4.get_version() != 4 {
tracing::info!("[USER_PACKET] not ipv4 packet: {:?}", ipv4);
return;
}
let dst_ipv4 = ipv4.get_destination();
tracing::trace!(
?ret,
"[USER_PACKET] recv new packet from tun device and forward to peers."
);
// TODO: use zero-copy
let send_ret = mgr.send_msg_ipv4(ret, dst_ipv4).await;
if send_ret.is_err() {
tracing::trace!(?send_ret, "[USER_PACKET] send_msg_ipv4 failed")
}
} else {
tracing::warn!(?ret, "[USER_PACKET] not ipv4 packet");
}
}
// async fn do_forward_nic_to_peers_ethernet(mut ret: BytesMut, mgr: &PeerManager) {
// if let Some(eth) = EthernetPacket::new(&ret) {
// log::warn!("begin to forward: {:?}, type: {}", eth, eth.get_ethertype());
// Self::do_forward_nic_to_peers_ipv4(ret.split_off(14), mgr).await;
// } else {
// log::warn!("not ipv4 packet: {:?}", ret);
// }
// }
fn do_forward_nic_to_peers(
&mut self,
mut stream: Pin<Box<dyn ZCPacketStream>>,
) -> Result<(), Error> {
// read from nic and write to corresponding tunnel
let mgr = self.peer_manager.clone();
self.tasks.spawn(async move {
while let Some(ret) = stream.next().await {
if ret.is_err() {
log::error!("read from nic failed: {:?}", ret);
break;
}
Self::do_forward_nic_to_peers_ipv4(ret.unwrap(), mgr.as_ref()).await;
// Self::do_forward_nic_to_peers_ethernet(ret.into(), mgr.as_ref()).await;
}
});
Ok(())
}
fn do_forward_peers_to_nic(
tasks: &mut JoinSet<()>,
mut sink: Pin<Box<dyn ZCPacketSink>>,
channel: Option<PacketRecvChanReceiver>,
) {
tasks.spawn(async move {
let mut channel = channel.unwrap();
while let Some(packet) = channel.recv().await {
tracing::trace!(
"[USER_PACKET] forward packet from peers to nic. packet: {:?}",
packet
);
let ret = sink.send(packet).await;
if ret.is_err() {
tracing::error!(?ret, "do_forward_tunnel_to_nic sink error");
}
}
});
}
async fn add_initial_peers(&mut self) -> Result<(), Error> {
for peer in self.global_ctx.config.get_peers().iter() {
self.get_conn_manager()
@@ -241,43 +385,115 @@ impl Instance {
Ok(())
}
async fn prepare_tun_device(&mut self) -> Result<(), Error> {
let mut nic = virtual_nic::VirtualNic::new(self.get_global_ctx());
let tunnel = nic.create_dev().await?;
self.global_ctx
.issue_event(GlobalCtxEvent::TunDeviceReady(nic.ifname().to_string()));
let (stream, sink) = tunnel.split();
self.virtual_nic = Some(Arc::new(nic));
self.do_forward_nic_to_peers(stream).unwrap();
Self::do_forward_peers_to_nic(
self.tasks.borrow_mut(),
sink,
self.peer_packet_receiver.take(),
);
Ok(())
async fn clear_nic_ctx(arc_nic_ctx: ArcNicCtx) {
let _ = arc_nic_ctx.lock().await.take();
}
async fn assign_ipv4_to_tun_device(&mut self, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
let nic = self.virtual_nic.as_ref().unwrap().clone();
nic.link_up().await?;
nic.remove_ip(None).await?;
nic.add_ip(ipv4_addr, 24).await?;
if cfg!(target_os = "macos") {
nic.add_route(ipv4_addr, 24).await?;
}
Ok(())
async fn use_new_nic_ctx(arc_nic_ctx: ArcNicCtx, nic_ctx: NicCtx) {
let mut g = arc_nic_ctx.lock().await;
*g = Some(nic_ctx);
}
// Warning, if there is an IP conflict in the network when using DHCP, the IP will be automatically changed.
fn check_dhcp_ip_conflict(&self) {
use rand::Rng;
let peer_manager_c = self.peer_manager.clone();
let global_ctx_c = self.get_global_ctx();
let nic_ctx = self.nic_ctx.clone();
let peer_packet_receiver = self.peer_packet_receiver.clone();
tokio::spawn(async move {
let default_ipv4_addr = Ipv4Addr::new(10, 0, 0, 0);
let mut dhcp_ip: Option<Ipv4Inet> = None;
let mut tries = 6;
loop {
let mut ipv4_addr: Option<Ipv4Inet> = None;
let mut unique_ipv4 = HashSet::new();
for i in 0..tries {
if dhcp_ip.is_none() {
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
for route in peer_manager_c.list_routes().await {
if !route.ipv4_addr.is_empty() {
if let Ok(ip) = Ipv4Inet::new(
if let Ok(ipv4) = route.ipv4_addr.parse::<Ipv4Addr>() {
ipv4
} else {
default_ipv4_addr
},
24,
) {
unique_ipv4.insert(ip);
}
}
}
if i == tries - 1 && unique_ipv4.is_empty() {
unique_ipv4.insert(Ipv4Inet::new(default_ipv4_addr, 24).unwrap());
}
if let Some(ip) = dhcp_ip {
if !unique_ipv4.contains(&ip) {
ipv4_addr = dhcp_ip;
break;
}
}
for net in unique_ipv4.iter().map(|inet| inet.network()).take(1) {
if let Some(ip) = net.iter().find(|ip| {
ip.address() != net.first_address()
&& ip.address() != net.last_address()
&& !unique_ipv4.contains(ip)
}) {
ipv4_addr = Some(ip);
}
}
}
if dhcp_ip != ipv4_addr {
let last_ip = dhcp_ip.map(|p| p.address());
tracing::debug!("last_ip: {:?}", last_ip);
Self::clear_nic_ctx(nic_ctx.clone()).await;
if let Some(ip) = ipv4_addr {
let mut new_nic_ctx = NicCtx::new(
global_ctx_c.clone(),
&peer_manager_c,
peer_packet_receiver.clone(),
);
dhcp_ip = Some(ip);
tries = 1;
if let Err(e) = new_nic_ctx.run(ip.address()).await {
tracing::error!("add ip failed: {:?}", e);
global_ctx_c.set_ipv4(None);
let sleep: u64 = rand::thread_rng().gen_range(200..500);
tokio::time::sleep(std::time::Duration::from_millis(sleep)).await;
continue;
}
global_ctx_c.set_ipv4(Some(ip.address()));
global_ctx_c.issue_event(GlobalCtxEvent::DhcpIpv4Changed(
last_ip,
Some(ip.address()),
));
Self::use_new_nic_ctx(nic_ctx.clone(), new_nic_ctx).await;
} else {
global_ctx_c.set_ipv4(None);
global_ctx_c.issue_event(GlobalCtxEvent::DhcpIpv4Conflicted(last_ip));
dhcp_ip = None;
tries = 6;
}
}
let sleep: u64 = rand::thread_rng().gen_range(5..10);
tokio::time::sleep(std::time::Duration::from_secs(sleep)).await;
}
});
}
pub async fn run(&mut self) -> Result<(), Error> {
self.prepare_tun_device().await?;
if let Some(ipv4_addr) = self.global_ctx.get_ipv4() {
self.assign_ipv4_to_tun_device(ipv4_addr).await?;
}
self.listener_manager
.lock()
.await
@@ -286,19 +502,35 @@ impl Instance {
self.listener_manager.lock().await.run().await?;
self.peer_manager.run().await?;
if self.global_ctx.config.get_dhcp() {
self.check_dhcp_ip_conflict();
} else if let Some(ipv4_addr) = self.global_ctx.get_ipv4() {
let mut new_nic_ctx = NicCtx::new(
self.global_ctx.clone(),
&self.peer_manager,
self.peer_packet_receiver.clone(),
);
new_nic_ctx.run(ipv4_addr).await?;
Self::use_new_nic_ctx(self.nic_ctx.clone(), new_nic_ctx).await;
}
self.run_rpc_server()?;
// run after tun device created, so listener can bind to tun device, which may be required by win 10
self.ip_proxy = Some(IpProxy::new(
self.get_global_ctx(),
self.get_peer_manager(),
)?);
self.ip_proxy.as_ref().unwrap().start().await?;
self.run_proxy_cidrs_route_updater();
self.run_ip_proxy().await?;
self.udp_hole_puncher.lock().await.run().await?;
self.peer_center.init().await;
let route_calc = self.peer_center.get_cost_calculator();
self.peer_manager
.get_route()
.set_route_cost_fn(route_calc)
.await;
self.add_initial_peers().await?;
@@ -309,6 +541,14 @@ impl Instance {
Ok(())
}
pub async fn run_ip_proxy(&mut self) -> Result<(), Error> {
if self.ip_proxy.is_none() {
return Err(anyhow::anyhow!("ip proxy not enabled.").into());
}
self.ip_proxy.as_ref().unwrap().start().await?;
Ok(())
}
pub async fn run_vpn_portal(&mut self) -> Result<(), Error> {
if self.global_ctx.get_vpn_portal_cidr().is_none() {
return Err(anyhow::anyhow!("vpn portal cidr not set.").into());
@@ -433,84 +673,6 @@ impl Instance {
Ok(())
}
fn run_proxy_cidrs_route_updater(&mut self) {
let peer_mgr = self.peer_manager.clone();
let global_ctx = self.global_ctx.clone();
let net_ns = self.global_ctx.net_ns.clone();
let nic = self.virtual_nic.as_ref().unwrap().clone();
let ifcfg = nic.get_ifcfg();
let ifname = nic.ifname().to_owned();
self.tasks.spawn(async move {
let mut cur_proxy_cidrs = vec![];
loop {
let mut proxy_cidrs = vec![];
let routes = peer_mgr.list_routes().await;
for r in routes {
for cidr in r.proxy_cidrs {
let Ok(cidr) = cidr.parse::<cidr::Ipv4Cidr>() else {
continue;
};
proxy_cidrs.push(cidr);
}
}
// add vpn portal cidr to proxy_cidrs
if let Some(vpn_cfg) = global_ctx.config.get_vpn_portal_config() {
proxy_cidrs.push(vpn_cfg.client_cidr);
}
// if route is in cur_proxy_cidrs but not in proxy_cidrs, delete it.
for cidr in cur_proxy_cidrs.iter() {
if proxy_cidrs.contains(cidr) {
continue;
}
let _g = net_ns.guard();
let ret = ifcfg
.remove_ipv4_route(
ifname.as_str(),
cidr.first_address(),
cidr.network_length(),
)
.await;
if ret.is_err() {
tracing::trace!(
cidr = ?cidr,
err = ?ret,
"remove route failed.",
);
}
}
for cidr in proxy_cidrs.iter() {
if cur_proxy_cidrs.contains(cidr) {
continue;
}
let _g = net_ns.guard();
let ret = ifcfg
.add_ipv4_route(
ifname.as_str(),
cidr.first_address(),
cidr.network_length(),
)
.await;
if ret.is_err() {
tracing::trace!(
cidr = ?cidr,
err = ?ret,
"add route failed.",
);
}
}
cur_proxy_cidrs = proxy_cidrs;
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
});
}
pub fn get_global_ctx(&self) -> ArcGlobalCtx {
self.global_ctx.clone()
}

View File

@@ -4,6 +4,10 @@ use anyhow::Context;
use async_trait::async_trait;
use tokio::{sync::Mutex, task::JoinSet};
#[cfg(feature = "quic")]
use crate::tunnel::quic::QUICTunnelListener;
#[cfg(feature = "wireguard")]
use crate::tunnel::wireguard::{WgConfig, WgTunnelListener};
use crate::{
common::{
error::Error,
@@ -12,33 +16,36 @@ use crate::{
},
peers::peer_manager::PeerManager,
tunnel::{
quic::QUICTunnelListener,
ring::RingTunnelListener,
tcp::TcpTunnelListener,
udp::UdpTunnelListener,
wireguard::{WgConfig, WgTunnelListener},
Tunnel, TunnelListener,
ring::RingTunnelListener, tcp::TcpTunnelListener, udp::UdpTunnelListener, Tunnel,
TunnelListener,
},
};
pub fn get_listener_by_url(
l: &url::Url,
ctx: ArcGlobalCtx,
_ctx: ArcGlobalCtx,
) -> Result<Box<dyn TunnelListener>, Error> {
Ok(match l.scheme() {
"tcp" => Box::new(TcpTunnelListener::new(l.clone())),
"udp" => Box::new(UdpTunnelListener::new(l.clone())),
#[cfg(feature = "wireguard")]
"wg" => {
let nid = ctx.get_network_identity();
let nid = _ctx.get_network_identity();
let wg_config = WgConfig::new_from_network_identity(
&nid.network_name,
&nid.network_secret.unwrap_or_default(),
);
Box::new(WgTunnelListener::new(l.clone(), wg_config))
}
#[cfg(feature = "quic")]
"quic" => Box::new(QUICTunnelListener::new(l.clone())),
#[cfg(feature = "websocket")]
"ws" | "wss" => {
use crate::tunnel::websocket::WSTunnelListener;
Box::new(WSTunnelListener::new(l.clone()))
}
_ => {
unreachable!("unsupported listener uri");
return Err(Error::InvalidUrl(l.to_string()));
}
})
}
@@ -94,7 +101,12 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
.await?;
for l in self.global_ctx.config.get_listener_uris().iter() {
let lis = get_listener_by_url(l, self.global_ctx.clone())?;
let Ok(lis) = get_listener_by_url(l, self.global_ctx.clone()) else {
let msg = format!("failed to get listener by url: {}, maybe not supported", l);
self.global_ctx
.issue_event(GlobalCtxEvent::ListenerAddFailed(l.clone(), msg));
continue;
};
self.add_listener(lis, true).await?;
}
@@ -152,6 +164,7 @@ impl<H: TunnelHandlerForListener + Send + Sync + 'static + Debug> ListenerManage
}
});
}
tracing::warn!("listener exit");
}
pub async fn run(&mut self) -> Result<(), Error> {

View File

@@ -58,7 +58,7 @@ impl Stream for TunStream {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<StreamItem>> {
let mut self_mut = self.project();
let mut g = ready!(self_mut.l.poll_lock(cx));
reserve_buf(&mut self_mut.cur_buf, 2500, 128 * 1024);
reserve_buf(&mut self_mut.cur_buf, 2500, 32 * 1024);
if self_mut.cur_buf.len() == 0 {
unsafe {
self_mut.cur_buf.set_len(*self_mut.payload_offset);
@@ -260,9 +260,8 @@ impl VirtualNic {
Ok(self)
}
async fn create_dev_ret_err(&mut self) -> Result<Box<dyn Tunnel>, Error> {
async fn create_tun(&mut self) -> Result<AsyncDevice, Error> {
let mut config = Configuration::default();
let has_packet_info = cfg!(target_os = "macos");
config.layer(Layer::L3);
#[cfg(target_os = "linux")]
@@ -275,11 +274,26 @@ impl VirtualNic {
#[cfg(target_os = "windows")]
{
use rand::distributions::Distribution as _;
use std::net::IpAddr;
let c = crate::arch::windows::interface_count()?;
config.name(format!("et{}_{}", self.dev_name, c));
let mut rng = rand::thread_rng();
let s: String = rand::distributions::Alphanumeric
.sample_iter(&mut rng)
.take(4)
.map(char::from)
.collect::<String>()
.to_lowercase();
config.name(format!("et{}_{}_{}", self.dev_name, c, s));
// set a temporary address
config.address(format!("172.0.{}.3", c).parse::<IpAddr>().unwrap());
config.platform(|config| {
config.skip_config(true);
config.guid(None);
config.ring_cap(Some(config.min_ring_cap() * 2));
});
}
if self.queue_num != 1 {
@@ -288,16 +302,30 @@ impl VirtualNic {
config.queues(self.queue_num);
config.up();
let dev = {
let _g = self.global_ctx.net_ns.guard();
create_as_async(&config)?
};
let _g = self.global_ctx.net_ns.guard();
Ok(create_as_async(&config)?)
}
async fn create_dev_ret_err(&mut self) -> Result<Box<dyn Tunnel>, Error> {
let dev = self.create_tun().await?;
let ifname = dev.get_ref().name()?;
self.ifcfg.wait_interface_show(ifname.as_str()).await?;
let (a, b) = BiLock::new(dev);
let flags = self.global_ctx.config.get_flags();
let mut mtu_in_config = flags.mtu;
if flags.enable_encryption {
mtu_in_config -= 20;
}
{
// set mtu by ourselves, rust-tun does not handle it correctly on windows
let _g = self.global_ctx.net_ns.guard();
self.ifcfg
.set_mtu(ifname.as_str(), mtu_in_config as u32)
.await?;
}
let has_packet_info = cfg!(target_os = "macos");
let (a, b) = BiLock::new(dev);
let ft = TunnelWrapper::new(
TunStream::new(a, has_packet_info),
FramedWriter::new_with_converter(

View File

@@ -3,8 +3,7 @@ use std::{
sync::{atomic::AtomicBool, Arc, RwLock},
};
use chrono::{DateTime, Local};
use easytier::{
use crate::{
common::{
config::{ConfigLoader, TomlConfigLoader},
global_ctx::GlobalCtxEvent,
@@ -16,7 +15,9 @@ use easytier::{
cli::{PeerInfo, Route, StunInfo},
peer::GetIpListResponse,
},
utils::{list_peer_route_pair, PeerRoutePair},
};
use chrono::{DateTime, Local};
use serde::{Deserialize, Serialize};
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
@@ -212,3 +213,68 @@ impl Drop for EasyTierLauncher {
}
}
}
#[derive(Deserialize, Serialize, Debug)]
pub struct NetworkInstanceRunningInfo {
pub my_node_info: MyNodeInfo,
pub events: Vec<(DateTime<Local>, GlobalCtxEvent)>,
pub node_info: MyNodeInfo,
pub routes: Vec<Route>,
pub peers: Vec<PeerInfo>,
pub peer_route_pairs: Vec<PeerRoutePair>,
pub running: bool,
pub error_msg: Option<String>,
}
pub struct NetworkInstance {
config: TomlConfigLoader,
launcher: Option<EasyTierLauncher>,
}
impl NetworkInstance {
pub fn new(config: TomlConfigLoader) -> Self {
Self {
config,
launcher: None,
}
}
pub fn is_easytier_running(&self) -> bool {
self.launcher.is_some() && self.launcher.as_ref().unwrap().running()
}
pub fn get_running_info(&self) -> Option<NetworkInstanceRunningInfo> {
if self.launcher.is_none() {
return None;
}
let launcher = self.launcher.as_ref().unwrap();
let peers = launcher.get_peers();
let routes = launcher.get_routes();
let peer_route_pairs = list_peer_route_pair(peers.clone(), routes.clone());
Some(NetworkInstanceRunningInfo {
my_node_info: launcher.get_node_info(),
events: launcher.get_events(),
node_info: launcher.get_node_info(),
routes,
peers,
peer_route_pairs,
running: launcher.running(),
error_msg: launcher.error_msg(),
})
}
pub fn start(&mut self) -> Result<(), anyhow::Error> {
if self.is_easytier_running() {
return Ok(());
}
let mut launcher = EasyTierLauncher::new();
launcher.start(|| Ok(self.config.clone()));
self.launcher = Some(launcher);
Ok(())
}
}

View File

@@ -1,13 +1,15 @@
#![allow(dead_code)]
pub mod arch;
mod arch;
mod connector;
mod gateway;
mod instance;
mod peer_center;
mod peers;
mod vpn_portal;
pub mod common;
pub mod connector;
pub mod gateway;
pub mod instance;
pub mod peer_center;
pub mod peers;
pub mod launcher;
pub mod rpc;
pub mod tunnel;
pub mod utils;
pub mod vpn_portal;

View File

@@ -1,24 +1,23 @@
use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, SystemTime},
collections::BTreeSet,
sync::Arc,
time::{Duration, Instant, SystemTime},
};
use crossbeam::atomic::AtomicCell;
use futures::Future;
use tokio::{
sync::{Mutex, RwLock},
task::JoinSet,
};
use std::sync::RwLock;
use tokio::sync::Mutex;
use tokio::task::JoinSet;
use tracing::Instrument;
use crate::{
common::PeerId,
peers::{peer_manager::PeerManager, rpc_service::PeerManagerRpcService},
peers::{
peer_manager::PeerManager,
route_trait::{RouteCostCalculator, RouteCostCalculatorInterface},
rpc_service::PeerManagerRpcService,
},
rpc::{GetGlobalPeerMapRequest, GetGlobalPeerMapResponse},
};
@@ -34,7 +33,8 @@ struct PeerCenterBase {
lock: Arc<Mutex<()>>,
}
static SERVICE_ID: u32 = 5;
// static SERVICE_ID: u32 = 5; for compatibility with the original code
static SERVICE_ID: u32 = 50;
struct PeridicJobCtx<T> {
peer_mgr: Arc<PeerManager>,
@@ -132,7 +132,7 @@ impl PeerCenterBase {
pub struct PeerCenterInstanceService {
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_digest: Arc<RwLock<Digest>>,
global_peer_map_digest: Arc<AtomicCell<Digest>>,
}
#[tonic::async_trait]
@@ -141,7 +141,7 @@ impl crate::rpc::cli::peer_center_rpc_server::PeerCenterRpc for PeerCenterInstan
&self,
_request: tonic::Request<GetGlobalPeerMapRequest>,
) -> Result<tonic::Response<GetGlobalPeerMapResponse>, tonic::Status> {
let global_peer_map = self.global_peer_map.read().await.clone();
let global_peer_map = self.global_peer_map.read().unwrap().clone();
Ok(tonic::Response::new(GetGlobalPeerMapResponse {
global_peer_map: global_peer_map
.map
@@ -157,7 +157,8 @@ pub struct PeerCenterInstance {
client: Arc<PeerCenterBase>,
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_digest: Arc<RwLock<Digest>>,
global_peer_map_digest: Arc<AtomicCell<Digest>>,
global_peer_map_update_time: Arc<AtomicCell<Instant>>,
}
impl PeerCenterInstance {
@@ -166,7 +167,8 @@ impl PeerCenterInstance {
peer_mgr: peer_mgr.clone(),
client: Arc::new(PeerCenterBase::new(peer_mgr.clone())),
global_peer_map: Arc::new(RwLock::new(GlobalPeerMap::new())),
global_peer_map_digest: Arc::new(RwLock::new(Digest::default())),
global_peer_map_digest: Arc::new(AtomicCell::new(Digest::default())),
global_peer_map_update_time: Arc::new(AtomicCell::new(Instant::now())),
}
}
@@ -179,12 +181,14 @@ impl PeerCenterInstance {
async fn init_get_global_info_job(&self) {
struct Ctx {
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_digest: Arc<RwLock<Digest>>,
global_peer_map_digest: Arc<AtomicCell<Digest>>,
global_peer_map_update_time: Arc<AtomicCell<Instant>>,
}
let ctx = Arc::new(Ctx {
global_peer_map: self.global_peer_map.clone(),
global_peer_map_digest: self.global_peer_map_digest.clone(),
global_peer_map_update_time: self.global_peer_map_update_time.clone(),
});
self.client
@@ -192,11 +196,19 @@ impl PeerCenterInstance {
let mut rpc_ctx = tarpc::context::current();
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
if ctx
.job_ctx
.global_peer_map_update_time
.load()
.elapsed()
.as_secs()
> 60
{
ctx.job_ctx.global_peer_map_digest.store(Digest::default());
}
let ret = client
.get_global_peer_map(
rpc_ctx,
ctx.job_ctx.global_peer_map_digest.read().await.clone(),
)
.get_global_peer_map(rpc_ctx, ctx.job_ctx.global_peer_map_digest.load())
.await?;
let Ok(resp) = ret else {
@@ -217,10 +229,13 @@ impl PeerCenterInstance {
resp.digest
);
*ctx.job_ctx.global_peer_map.write().await = resp.global_peer_map;
*ctx.job_ctx.global_peer_map_digest.write().await = resp.digest;
*ctx.job_ctx.global_peer_map.write().unwrap() = resp.global_peer_map;
ctx.job_ctx.global_peer_map_digest.store(resp.digest);
ctx.job_ctx
.global_peer_map_update_time
.store(Instant::now());
Ok(10000)
Ok(5000)
})
.await;
}
@@ -228,67 +243,53 @@ impl PeerCenterInstance {
async fn init_report_peers_job(&self) {
struct Ctx {
service: PeerManagerRpcService,
need_send_peers: AtomicBool,
last_report_peers: Mutex<PeerInfoForGlobalMap>,
last_report_peers: Mutex<BTreeSet<PeerId>>,
last_center_peer: AtomicCell<PeerId>,
last_report_time: AtomicCell<Instant>,
}
let ctx = Arc::new(Ctx {
service: PeerManagerRpcService::new(self.peer_mgr.clone()),
need_send_peers: AtomicBool::new(true),
last_report_peers: Mutex::new(PeerInfoForGlobalMap::default()),
last_report_peers: Mutex::new(BTreeSet::new()),
last_center_peer: AtomicCell::new(PeerId::default()),
last_report_time: AtomicCell::new(Instant::now()),
});
self.client
.init_periodic_job(ctx, |client, ctx| async move {
let my_node_id = ctx.peer_mgr.my_peer_id();
let peers: PeerInfoForGlobalMap = ctx.job_ctx.service.list_peers().await.into();
let peer_list = peers.direct_peers.keys().map(|k| *k).collect();
let job_ctx = &ctx.job_ctx;
// if peers are not same in next 10 seconds, report peers to center server
let mut peers = PeerInfoForGlobalMap::default();
for _ in 1..10 {
peers = ctx.job_ctx.service.list_peers().await.into();
if ctx.center_peer.load() != ctx.job_ctx.last_center_peer.load() {
// if center peer changed, report peers immediately
break;
}
if peers == *ctx.job_ctx.last_report_peers.lock().await {
return Ok(3000);
}
tokio::time::sleep(Duration::from_secs(2)).await;
// only report when:
// 1. center peer changed
// 2. last report time is more than 60 seconds
// 3. peers changed
if ctx.center_peer.load() == ctx.job_ctx.last_center_peer.load()
&& job_ctx.last_report_time.load().elapsed().as_secs() < 60
&& *job_ctx.last_report_peers.lock().await == peer_list
{
return Ok(5000);
}
*ctx.job_ctx.last_report_peers.lock().await = peers.clone();
let mut hasher = DefaultHasher::new();
peers.hash(&mut hasher);
let peers = if ctx.job_ctx.need_send_peers.load(Ordering::Relaxed) {
Some(peers)
} else {
None
};
let mut rpc_ctx = tarpc::context::current();
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
let ret = client
.report_peers(
rpc_ctx,
my_node_id.clone(),
peers,
hasher.finish() as Digest,
)
.report_peers(rpc_ctx, my_node_id.clone(), peers)
.await?;
if matches!(ret.as_ref().err(), Some(Error::DigestMismatch)) {
ctx.job_ctx.need_send_peers.store(true, Ordering::Relaxed);
return Ok(0);
} else if ret.is_err() {
if ret.is_ok() {
ctx.job_ctx.last_center_peer.store(ctx.center_peer.load());
*ctx.job_ctx.last_report_peers.lock().await = peer_list;
ctx.job_ctx.last_report_time.store(Instant::now());
} else {
tracing::error!("report peers to center server got error result: {:?}", ret);
return Ok(500);
}
ctx.job_ctx.last_center_peer.store(ctx.center_peer.load());
ctx.job_ctx.need_send_peers.store(false, Ordering::Relaxed);
Ok(3000)
Ok(5000)
})
.await;
}
@@ -299,15 +300,60 @@ impl PeerCenterInstance {
global_peer_map_digest: self.global_peer_map_digest.clone(),
}
}
pub fn get_cost_calculator(&self) -> RouteCostCalculator {
struct RouteCostCalculatorImpl {
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
global_peer_map_clone: GlobalPeerMap,
last_update_time: AtomicCell<Instant>,
global_peer_map_update_time: Arc<AtomicCell<Instant>>,
}
impl RouteCostCalculatorInterface for RouteCostCalculatorImpl {
fn calculate_cost(&self, src: PeerId, dst: PeerId) -> i32 {
let ret = self
.global_peer_map_clone
.map
.get(&src)
.and_then(|src_peer_info| src_peer_info.direct_peers.get(&dst))
.and_then(|info| Some(info.latency_ms));
ret.unwrap_or(80)
}
fn begin_update(&mut self) {
let global_peer_map = self.global_peer_map.read().unwrap();
self.global_peer_map_clone = global_peer_map.clone();
}
fn end_update(&mut self) {
self.last_update_time
.store(self.global_peer_map_update_time.load());
}
fn need_update(&self) -> bool {
self.last_update_time.load() < self.global_peer_map_update_time.load()
}
}
Box::new(RouteCostCalculatorImpl {
global_peer_map: self.global_peer_map.clone(),
global_peer_map_clone: GlobalPeerMap::new(),
last_update_time: AtomicCell::new(
self.global_peer_map_update_time.load() - Duration::from_secs(1),
),
global_peer_map_update_time: self.global_peer_map_update_time.clone(),
})
}
}
#[cfg(test)]
mod tests {
use std::ops::Deref;
use crate::{
peer_center::server::get_global_data,
peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear},
tunnel::common::tests::wait_for_condition,
};
use super::*;
@@ -340,43 +386,64 @@ mod tests {
let center_data = get_global_data(center_peer);
// wait center_data has 3 records for 10 seconds
let now = std::time::Instant::now();
loop {
if center_data.read().await.global_peer_map.map.len() == 3 {
println!(
"center data ready, {:#?}",
center_data.read().await.global_peer_map
);
break;
}
if now.elapsed().as_secs() > 60 {
panic!("center data not ready");
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
wait_for_condition(
|| async {
if center_data.global_peer_map.len() == 4 {
println!("center data {:#?}", center_data.global_peer_map);
true
} else {
false
}
},
Duration::from_secs(10),
)
.await;
let mut digest = None;
for pc in peer_centers.iter() {
let rpc_service = pc.get_rpc_service();
let now = std::time::Instant::now();
while now.elapsed().as_secs() < 10 {
if rpc_service.global_peer_map.read().await.map.len() == 3 {
break;
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
assert_eq!(rpc_service.global_peer_map.read().await.map.len(), 3);
wait_for_condition(
|| async { rpc_service.global_peer_map.read().unwrap().map.len() == 3 },
Duration::from_secs(10),
)
.await;
println!("rpc service ready, {:#?}", rpc_service.global_peer_map);
if digest.is_none() {
digest = Some(rpc_service.global_peer_map_digest.read().await.clone());
digest = Some(rpc_service.global_peer_map_digest.load());
} else {
let v = rpc_service.global_peer_map_digest.read().await;
assert_eq!(digest.as_ref().unwrap(), v.deref());
let v = rpc_service.global_peer_map_digest.load();
assert_eq!(digest.unwrap(), v);
}
let mut route_cost = pc.get_cost_calculator();
assert!(route_cost.need_update());
route_cost.begin_update();
assert!(
route_cost.calculate_cost(peer_mgr_a.my_peer_id(), peer_mgr_b.my_peer_id()) < 30
);
assert!(
route_cost.calculate_cost(peer_mgr_b.my_peer_id(), peer_mgr_a.my_peer_id()) < 30
);
assert!(
route_cost.calculate_cost(peer_mgr_b.my_peer_id(), peer_mgr_c.my_peer_id()) < 30
);
assert!(
route_cost.calculate_cost(peer_mgr_c.my_peer_id(), peer_mgr_b.my_peer_id()) < 30
);
assert!(
route_cost.calculate_cost(peer_mgr_c.my_peer_id(), peer_mgr_a.my_peer_id()) > 50
);
assert!(
route_cost.calculate_cost(peer_mgr_a.my_peer_id(), peer_mgr_c.my_peer_id()) > 50
);
route_cost.end_update();
assert!(!route_cost.need_update());
}
let global_digest = get_global_data(center_peer).read().await.digest.clone();
let global_digest = get_global_data(center_peer).digest.load();
assert_eq!(digest.as_ref().unwrap(), &global_digest);
}
}

View File

@@ -1,45 +1,48 @@
use std::{
collections::BinaryHeap,
hash::{Hash, Hasher},
sync::Arc,
};
use crossbeam::atomic::AtomicCell;
use dashmap::DashMap;
use once_cell::sync::Lazy;
use tokio::{sync::RwLock, task::JoinSet};
use tokio::{task::JoinSet};
use crate::common::PeerId;
use crate::{common::PeerId, rpc::DirectConnectedPeerInfo};
use super::{
service::{GetGlobalPeerMapResponse, GlobalPeerMap, PeerCenterService, PeerInfoForGlobalMap},
Digest, Error,
};
pub(crate) struct PeerCenterServerGlobalData {
pub global_peer_map: GlobalPeerMap,
pub digest: Digest,
pub update_time: std::time::Instant,
pub peer_update_time: DashMap<PeerId, std::time::Instant>,
#[derive(Debug, Clone, PartialEq, PartialOrd, Ord, Eq, Hash)]
pub(crate) struct SrcDstPeerPair {
src: PeerId,
dst: PeerId,
}
impl PeerCenterServerGlobalData {
fn new() -> Self {
PeerCenterServerGlobalData {
global_peer_map: GlobalPeerMap::new(),
digest: Digest::default(),
update_time: std::time::Instant::now(),
peer_update_time: DashMap::new(),
}
}
#[derive(Debug, Clone)]
pub(crate) struct PeerCenterInfoEntry {
info: DirectConnectedPeerInfo,
update_time: std::time::Instant,
}
#[derive(Default)]
pub(crate) struct PeerCenterServerGlobalData {
pub(crate) global_peer_map: DashMap<SrcDstPeerPair, PeerCenterInfoEntry>,
pub(crate) peer_report_time: DashMap<PeerId, std::time::Instant>,
pub(crate) digest: AtomicCell<Digest>,
}
// a global unique instance for PeerCenterServer
pub(crate) static GLOBAL_DATA: Lazy<DashMap<PeerId, Arc<RwLock<PeerCenterServerGlobalData>>>> =
pub(crate) static GLOBAL_DATA: Lazy<DashMap<PeerId, Arc<PeerCenterServerGlobalData>>> =
Lazy::new(DashMap::new);
pub(crate) fn get_global_data(node_id: PeerId) -> Arc<RwLock<PeerCenterServerGlobalData>> {
pub(crate) fn get_global_data(node_id: PeerId) -> Arc<PeerCenterServerGlobalData> {
GLOBAL_DATA
.entry(node_id)
.or_insert_with(|| Arc::new(RwLock::new(PeerCenterServerGlobalData::new())))
.or_insert_with(|| Arc::new(PeerCenterServerGlobalData::default()))
.value()
.clone()
}
@@ -48,8 +51,6 @@ pub(crate) fn get_global_data(node_id: PeerId) -> Arc<RwLock<PeerCenterServerGlo
pub struct PeerCenterServer {
// every peer has its own server, so use per-struct dash map is ok.
my_node_id: PeerId,
digest_map: DashMap<PeerId, Digest>,
tasks: Arc<JoinSet<()>>,
}
@@ -65,26 +66,32 @@ impl PeerCenterServer {
PeerCenterServer {
my_node_id,
digest_map: DashMap::new(),
tasks: Arc::new(tasks),
}
}
async fn clean_outdated_peer(my_node_id: PeerId) {
let data = get_global_data(my_node_id);
let mut locked_data = data.write().await;
let now = std::time::Instant::now();
let mut to_remove = Vec::new();
for kv in locked_data.peer_update_time.iter() {
if now.duration_since(*kv.value()).as_secs() > 20 {
to_remove.push(*kv.key());
}
}
for peer_id in to_remove {
locked_data.global_peer_map.map.remove(&peer_id);
locked_data.peer_update_time.remove(&peer_id);
}
data.peer_report_time.retain(|_, v| {
std::time::Instant::now().duration_since(*v) < std::time::Duration::from_secs(180)
});
data.global_peer_map.retain(|_, v| {
std::time::Instant::now().duration_since(v.update_time)
< std::time::Duration::from_secs(180)
});
}
fn calc_global_digest(my_node_id: PeerId) -> Digest {
let data = get_global_data(my_node_id);
let mut hasher = std::collections::hash_map::DefaultHasher::new();
data.global_peer_map
.iter()
.map(|v| v.key().clone())
.collect::<BinaryHeap<_>>()
.into_sorted_vec()
.into_iter()
.for_each(|v| v.hash(&mut hasher));
hasher.finish()
}
}
@@ -95,39 +102,28 @@ impl PeerCenterService for PeerCenterServer {
self,
_: tarpc::context::Context,
my_peer_id: PeerId,
peers: Option<PeerInfoForGlobalMap>,
digest: Digest,
peers: PeerInfoForGlobalMap,
) -> Result<(), Error> {
tracing::trace!("receive report_peers");
tracing::debug!("receive report_peers");
let data = get_global_data(self.my_node_id);
let mut locked_data = data.write().await;
locked_data
.peer_update_time
data.peer_report_time
.insert(my_peer_id, std::time::Instant::now());
let old_digest = self.digest_map.get(&my_peer_id);
// if digest match, no need to update
if let Some(old_digest) = old_digest {
if *old_digest == digest {
return Ok(());
}
for (peer_id, peer_info) in peers.direct_peers {
let pair = SrcDstPeerPair {
src: my_peer_id,
dst: peer_id,
};
let entry = PeerCenterInfoEntry {
info: peer_info,
update_time: std::time::Instant::now(),
};
data.global_peer_map.insert(pair, entry);
}
if peers.is_none() {
return Err(Error::DigestMismatch);
}
self.digest_map.insert(my_peer_id, digest);
locked_data
.global_peer_map
.map
.insert(my_peer_id, peers.unwrap());
let mut hasher = std::collections::hash_map::DefaultHasher::new();
locked_data.global_peer_map.map.hash(&mut hasher);
locked_data.digest = hasher.finish() as Digest;
locked_data.update_time = std::time::Instant::now();
data.digest
.store(PeerCenterServer::calc_global_digest(self.my_node_id));
Ok(())
}
@@ -138,15 +134,26 @@ impl PeerCenterService for PeerCenterServer {
digest: Digest,
) -> Result<Option<GetGlobalPeerMapResponse>, Error> {
let data = get_global_data(self.my_node_id);
if digest == data.read().await.digest {
if digest == data.digest.load() && digest != 0 {
return Ok(None);
}
let data = get_global_data(self.my_node_id);
let locked_data = data.read().await;
let mut global_peer_map = GlobalPeerMap::new();
for item in data.global_peer_map.iter() {
let (pair, entry) = item.pair();
global_peer_map
.map
.entry(pair.src)
.or_insert_with(|| PeerInfoForGlobalMap {
direct_peers: Default::default(),
})
.direct_peers
.insert(pair.dst, entry.info.clone());
}
Ok(Some(GetGlobalPeerMapResponse {
global_peer_map: locked_data.global_peer_map.clone(),
digest: locked_data.digest,
global_peer_map,
digest: data.digest.load(),
}))
}
}

View File

@@ -5,39 +5,23 @@ use crate::{common::PeerId, rpc::DirectConnectedPeerInfo};
use super::{Digest, Error};
use crate::rpc::PeerInfo;
pub type LatencyLevel = crate::rpc::cli::LatencyLevel;
impl LatencyLevel {
pub const fn from_latency_ms(lat_ms: u32) -> Self {
if lat_ms < 10 {
LatencyLevel::VeryLow
} else if lat_ms < 50 {
LatencyLevel::Low
} else if lat_ms < 100 {
LatencyLevel::Normal
} else if lat_ms < 200 {
LatencyLevel::High
} else {
LatencyLevel::VeryHigh
}
}
}
pub type PeerInfoForGlobalMap = crate::rpc::cli::PeerInfoForGlobalMap;
impl From<Vec<PeerInfo>> for PeerInfoForGlobalMap {
fn from(peers: Vec<PeerInfo>) -> Self {
let mut peer_map = BTreeMap::new();
for peer in peers {
let min_lat = peer
let Some(min_lat) = peer
.conns
.iter()
.map(|conn| conn.stats.as_ref().unwrap().latency_us)
.min()
.unwrap_or(0);
else {
continue;
};
let dp_info = DirectConnectedPeerInfo {
latency_level: LatencyLevel::from_latency_ms(min_lat as u32 / 1000) as i32,
latency_ms: std::cmp::max(1, (min_lat as u32 / 1000) as i32),
};
// sort conn info so hash result is stable
@@ -73,11 +57,7 @@ pub struct GetGlobalPeerMapResponse {
pub trait PeerCenterService {
// report center server which peer is directly connected to me
// digest is a hash of current peer map, if digest not match, we need to transfer the whole map
async fn report_peers(
my_peer_id: PeerId,
peers: Option<PeerInfoForGlobalMap>,
digest: Digest,
) -> Result<(), Error>;
async fn report_peers(my_peer_id: PeerId, peers: PeerInfoForGlobalMap) -> Result<(), Error>;
async fn get_global_peer_map(digest: Digest)
-> Result<Option<GetGlobalPeerMapResponse>, Error>;

View File

@@ -0,0 +1,146 @@
use aes_gcm::aead::consts::{U12, U16};
use aes_gcm::aead::generic_array::GenericArray;
use aes_gcm::{AeadCore, AeadInPlace, Aes128Gcm, Aes256Gcm, Key, KeyInit, Nonce, Tag};
use rand::rngs::OsRng;
use zerocopy::{AsBytes, FromBytes};
use crate::tunnel::packet_def::{AesGcmTail, ZCPacket, AES_GCM_ENCRYPTION_RESERVED};
use super::{Encryptor, Error};
#[derive(Clone)]
pub struct AesGcmCipher {
pub(crate) cipher: AesGcmEnum,
}
#[derive(Clone)]
pub enum AesGcmEnum {
AES128GCM(Aes128Gcm),
AES256GCM(Aes256Gcm),
}
impl AesGcmCipher {
pub fn new_128(key: [u8; 16]) -> Self {
let key: &Key<Aes128Gcm> = &key.into();
Self {
cipher: AesGcmEnum::AES128GCM(Aes128Gcm::new(key)),
}
}
pub fn new_256(key: [u8; 32]) -> Self {
let key: &Key<Aes256Gcm> = &key.into();
Self {
cipher: AesGcmEnum::AES256GCM(Aes256Gcm::new(key)),
}
}
}
impl Encryptor for AesGcmCipher {
fn decrypt(&self, zc_packet: &mut ZCPacket) -> Result<(), Error> {
let pm_header = zc_packet.peer_manager_header().unwrap();
if !pm_header.is_encrypted() {
return Ok(());
}
let payload_len = zc_packet.payload().len();
if payload_len < AES_GCM_ENCRYPTION_RESERVED {
return Err(Error::PacketTooShort(zc_packet.payload().len()));
}
let text_len = payload_len - AES_GCM_ENCRYPTION_RESERVED;
let aes_tail = AesGcmTail::ref_from_suffix(zc_packet.payload())
.unwrap()
.clone();
let nonce: &GenericArray<u8, U12> = Nonce::from_slice(&aes_tail.nonce);
let tag: GenericArray<u8, U16> = Tag::clone_from_slice(aes_tail.tag.as_slice());
let rs = match &self.cipher {
AesGcmEnum::AES128GCM(aes_gcm) => aes_gcm.decrypt_in_place_detached(
nonce,
&[],
&mut zc_packet.mut_payload()[..text_len],
&tag,
),
AesGcmEnum::AES256GCM(aes_gcm) => aes_gcm.decrypt_in_place_detached(
nonce,
&[],
&mut zc_packet.mut_payload()[..text_len],
&tag,
),
};
if let Err(e) = rs {
println!("error: {:?}", e.to_string());
return Err(Error::DecryptionFailed);
}
let pm_header = zc_packet.mut_peer_manager_header().unwrap();
pm_header.set_encrypted(false);
let old_len = zc_packet.buf_len();
zc_packet
.mut_inner()
.truncate(old_len - AES_GCM_ENCRYPTION_RESERVED);
return Ok(());
}
fn encrypt(&self, zc_packet: &mut ZCPacket) -> Result<(), Error> {
let pm_header = zc_packet.peer_manager_header().unwrap();
if pm_header.is_encrypted() {
tracing::warn!(?zc_packet, "packet is already encrypted");
return Ok(());
}
let mut tail = AesGcmTail::default();
let rs = match &self.cipher {
AesGcmEnum::AES128GCM(aes_gcm) => {
let nonce = Aes128Gcm::generate_nonce(&mut OsRng);
tail.nonce.copy_from_slice(nonce.as_slice());
aes_gcm.encrypt_in_place_detached(&nonce, &[], zc_packet.mut_payload())
}
AesGcmEnum::AES256GCM(aes_gcm) => {
let nonce = Aes256Gcm::generate_nonce(&mut OsRng);
tail.nonce.copy_from_slice(nonce.as_slice());
aes_gcm.encrypt_in_place_detached(&nonce, &[], zc_packet.mut_payload())
}
};
return match rs {
Ok(tag) => {
tail.tag.copy_from_slice(tag.as_slice());
let pm_header = zc_packet.mut_peer_manager_header().unwrap();
pm_header.set_encrypted(true);
zc_packet.mut_inner().extend_from_slice(tail.as_bytes());
Ok(())
}
Err(_) => Err(Error::EncryptionFailed),
};
}
}
#[cfg(test)]
mod tests {
use crate::{
peers::encrypt::{aes_gcm::AesGcmCipher, Encryptor},
tunnel::packet_def::{ZCPacket, AES_GCM_ENCRYPTION_RESERVED},
};
#[test]
fn test_aes_gcm_cipher() {
let key = [0u8; 16];
let cipher = AesGcmCipher::new_128(key);
let text = b"1234567";
let mut packet = ZCPacket::new_with_payload(text);
packet.fill_peer_manager_hdr(0, 0, 0);
cipher.encrypt(&mut packet).unwrap();
assert_eq!(
packet.payload().len(),
text.len() + AES_GCM_ENCRYPTION_RESERVED
);
assert_eq!(packet.peer_manager_header().unwrap().is_encrypted(), true);
cipher.decrypt(&mut packet).unwrap();
assert_eq!(packet.payload(), text);
assert_eq!(packet.peer_manager_header().unwrap().is_encrypted(), false);
}
}

View File

@@ -1,11 +1,13 @@
use crate::tunnel::packet_def::ZCPacket;
#[cfg(feature = "wireguard")]
pub mod ring_aes_gcm;
#[cfg(feature = "aes-gcm")]
pub mod aes_gcm;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("packet is not encrypted")]
NotEcrypted,
#[error("packet is too short. len: {0}")]
PacketTooShort(usize),
#[error("decryption failed")]
@@ -28,7 +30,12 @@ impl Encryptor for NullCipher {
Ok(())
}
fn decrypt(&self, _zc_packet: &mut ZCPacket) -> Result<(), Error> {
Ok(())
fn decrypt(&self, zc_packet: &mut ZCPacket) -> Result<(), Error> {
let pm_header = zc_packet.peer_manager_header().unwrap();
if pm_header.is_encrypted() {
return Err(Error::DecryptionFailed);
} else {
Ok(())
}
}
}

View File

@@ -54,7 +54,7 @@ impl Encryptor for AesGcmCipher {
fn decrypt(&self, zc_packet: &mut ZCPacket) -> Result<(), Error> {
let pm_header = zc_packet.peer_manager_header().unwrap();
if !pm_header.is_encrypted() {
return Err(Error::NotEcrypted);
return Ok(());
}
let payload_len = zc_packet.payload().len();

View File

@@ -29,6 +29,7 @@ use super::{
peer_conn::PeerConn,
peer_map::PeerMap,
peer_rpc::{PeerRpcManager, PeerRpcManagerTransport},
route_trait::NextHopPolicy,
PacketRecvChan, PacketRecvChanReceiver,
};
@@ -66,7 +67,10 @@ impl ForeignNetworkManagerData {
.get(&network_name)
.ok_or_else(|| Error::RouteError(Some("no peer in network".to_string())))?
.clone();
entry.peer_map.send_msg(msg, dst_peer_id).await
entry
.peer_map
.send_msg(msg, dst_peer_id, NextHopPolicy::LeastHop)
.await
}
fn get_peer_network(&self, peer_id: PeerId) -> Option<String> {
@@ -275,7 +279,10 @@ impl ForeignNetworkManager {
}
if let Some(entry) = data.get_network_entry(&from_network) {
let ret = entry.peer_map.send_msg(packet_bytes, to_peer_id).await;
let ret = entry
.peer_map
.send_msg(packet_bytes, to_peer_id, NextHopPolicy::LeastHop)
.await;
if ret.is_err() {
tracing::error!("forward packet to peer failed: {:?}", ret.err());
}

View File

@@ -1,4 +1,3 @@
pub mod packet;
pub mod peer;
// pub mod peer_conn;
pub mod peer_conn;

View File

@@ -1,254 +0,0 @@
use std::fmt::Debug;
use rkyv::{Archive, Deserialize, Serialize};
use tokio_util::bytes::Bytes;
use crate::common::{
global_ctx::NetworkIdentity,
rkyv_util::{decode_from_bytes, encode_to_bytes, vec_to_string},
PeerId,
};
const MAGIC: u32 = 0xd1e1a5e1;
const VERSION: u32 = 1;
#[derive(Archive, Deserialize, Serialize, PartialEq, Clone)]
#[archive(compare(PartialEq), check_bytes)]
// Derives can be passed through to the generated type:
#[archive_attr(derive(Debug))]
pub struct UUID(uuid::Bytes);
// impl Debug for UUID
impl std::fmt::Debug for UUID {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let uuid = uuid::Uuid::from_bytes(self.0);
write!(f, "{}", uuid)
}
}
impl From<uuid::Uuid> for UUID {
fn from(uuid: uuid::Uuid) -> Self {
UUID(*uuid.as_bytes())
}
}
impl From<UUID> for uuid::Uuid {
fn from(uuid: UUID) -> Self {
uuid::Uuid::from_bytes(uuid.0)
}
}
impl ArchivedUUID {
pub fn to_uuid(&self) -> uuid::Uuid {
uuid::Uuid::from_bytes(self.0)
}
}
impl From<&ArchivedUUID> for UUID {
fn from(uuid: &ArchivedUUID) -> Self {
UUID(uuid.0)
}
}
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct HandShake {
pub magic: u32,
pub my_peer_id: PeerId,
pub version: u32,
pub features: Vec<String>,
pub network_identity: NetworkIdentity,
}
#[derive(serde::Serialize, serde::Deserialize, Debug)]
pub struct RoutePacket {
pub route_id: u8,
pub body: Vec<u8>,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub enum CtrlPacketPayload {
HandShake(HandShake),
RoutePacket(RoutePacket),
Ping(u32),
Pong(u32),
TaRpc(u32, u32, bool, Vec<u8>), // u32: service_id, u32: transact_id, bool: is_req, Vec<u8>: rpc body
}
impl CtrlPacketPayload {
pub fn from_packet(p: &ArchivedPacket) -> CtrlPacketPayload {
assert_ne!(p.packet_type, PacketType::Data);
postcard::from_bytes(p.payload.as_bytes()).unwrap()
}
pub fn from_packet2(p: &Packet) -> CtrlPacketPayload {
postcard::from_bytes(p.payload.as_bytes()).unwrap()
}
}
#[repr(u8)]
#[derive(Archive, Deserialize, Serialize, Debug)]
#[archive(compare(PartialEq), check_bytes)]
// Derives can be passed through to the generated type:
#[archive_attr(derive(Debug))]
pub enum PacketType {
Data = 1,
HandShake = 2,
RoutePacket = 3,
Ping = 4,
Pong = 5,
TaRpc = 6,
}
#[derive(Archive, Deserialize, Serialize)]
#[archive(compare(PartialEq), check_bytes)]
// Derives can be passed through to the generated type:
pub struct Packet {
pub from_peer: PeerId,
pub to_peer: PeerId,
pub packet_type: PacketType,
pub payload: String,
}
impl std::fmt::Debug for Packet {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Packet {{ from_peer: {}, to_peer: {}, packet_type: {:?}, payload: {:?} }}",
self.from_peer,
self.to_peer,
self.packet_type,
&self.payload.as_bytes()
)
}
}
impl std::fmt::Debug for ArchivedPacket {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Packet {{ from_peer: {}, to_peer: {}, packet_type: {:?}, payload: {:?} }}",
self.from_peer,
self.to_peer,
self.packet_type,
&self.payload.as_bytes()
)
}
}
impl Packet {
pub fn decode(v: &[u8]) -> &ArchivedPacket {
decode_from_bytes::<Packet>(v).unwrap()
}
pub fn new(
from_peer: PeerId,
to_peer: PeerId,
packet_type: PacketType,
payload: Vec<u8>,
) -> Self {
Packet {
from_peer,
to_peer,
packet_type,
payload: vec_to_string(payload),
}
}
}
impl From<Packet> for Bytes {
fn from(val: Packet) -> Self {
encode_to_bytes::<_, 4096>(&val)
}
}
impl Packet {
pub fn new_handshake(from_peer: PeerId, network: &NetworkIdentity) -> Self {
let handshake = CtrlPacketPayload::HandShake(HandShake {
magic: MAGIC,
my_peer_id: from_peer,
version: VERSION,
features: Vec::new(),
network_identity: network.clone().into(),
});
Packet::new(
from_peer.into(),
0,
PacketType::HandShake,
postcard::to_allocvec(&handshake).unwrap(),
)
}
pub fn new_data_packet(from_peer: PeerId, to_peer: PeerId, data: &[u8]) -> Self {
Packet::new(from_peer, to_peer, PacketType::Data, data.to_vec())
}
pub fn new_route_packet(from_peer: PeerId, to_peer: PeerId, route_id: u8, data: &[u8]) -> Self {
let route = CtrlPacketPayload::RoutePacket(RoutePacket {
route_id,
body: data.to_vec(),
});
Packet::new(
from_peer,
to_peer,
PacketType::RoutePacket,
postcard::to_allocvec(&route).unwrap(),
)
}
pub fn new_ping_packet(from_peer: PeerId, to_peer: PeerId, seq: u32) -> Self {
let ping = CtrlPacketPayload::Ping(seq);
Packet::new(
from_peer,
to_peer,
PacketType::Ping,
postcard::to_allocvec(&ping).unwrap(),
)
}
pub fn new_pong_packet(from_peer: PeerId, to_peer: PeerId, seq: u32) -> Self {
let pong = CtrlPacketPayload::Pong(seq);
Packet::new(
from_peer,
to_peer,
PacketType::Pong,
postcard::to_allocvec(&pong).unwrap(),
)
}
pub fn new_tarpc_packet(
from_peer: PeerId,
to_peer: PeerId,
service_id: u32,
transact_id: u32,
is_req: bool,
body: Vec<u8>,
) -> Self {
let ta_rpc = CtrlPacketPayload::TaRpc(service_id, transact_id, is_req, body);
Packet::new(
from_peer,
to_peer,
PacketType::TaRpc,
postcard::to_allocvec(&ta_rpc).unwrap(),
)
}
}
#[cfg(test)]
mod tests {
use crate::common::new_peer_id;
use super::*;
#[tokio::test]
async fn serialize() {
let a = "abcde";
let out = Packet::new_data_packet(new_peer_id(), new_peer_id(), a.as_bytes());
// let out = T::new(a.as_bytes());
let out_bytes: Bytes = out.into();
println!("out str: {:?}", a.as_bytes());
println!("out bytes: {:?}", out_bytes);
let archived = Packet::decode(&out_bytes[..]);
println!("in packet: {:?}", archived);
}
}

View File

@@ -29,8 +29,8 @@ use crate::{
global_ctx::ArcGlobalCtx,
PeerId,
},
peers::packet::PacketType,
rpc::{HandshakeRequest, PeerConnInfo, PeerConnStats, TunnelInfo},
tunnel::packet_def::PacketType,
tunnel::{
filter::{StatsRecorderTunnelFilter, TunnelFilter, TunnelWithFilter},
mpsc::{MpscTunnel, MpscTunnelSender},

View File

@@ -76,7 +76,7 @@ impl PeerConnPinger {
let now = std::time::Instant::now();
// wait until we get a pong packet in ctrl_resp_receiver
let resp = timeout(Duration::from_secs(1), async {
let resp = timeout(Duration::from_secs(2), async {
loop {
match receiver.recv().await {
Ok(p) => {

View File

@@ -22,17 +22,20 @@ use tokio_util::bytes::Bytes;
use crate::{
common::{error::Error, global_ctx::ArcGlobalCtx, PeerId},
peers::{
packet, peer_conn::PeerConn, peer_rpc::PeerRpcManagerTransport,
route_trait::RouteInterface, PeerPacketFilter,
peer_conn::PeerConn,
peer_rpc::PeerRpcManagerTransport,
route_trait::{NextHopPolicy, RouteInterface},
PeerPacketFilter,
},
tunnel::{
self,
packet_def::{PacketType, ZCPacket},
SinkItem, Tunnel, TunnelConnector,
},
};
use super::{
encrypt::{ring_aes_gcm::AesGcmCipher, Encryptor, NullCipher},
encrypt::{Encryptor, NullCipher},
foreign_network_client::ForeignNetworkClient,
foreign_network_manager::ForeignNetworkManager,
peer_conn::PeerConnId,
@@ -72,7 +75,10 @@ impl PeerRpcManagerTransport for RpcTransport {
.ok_or(Error::Unknown)?;
let peers = self.peers.upgrade().ok_or(Error::Unknown)?;
if let Some(gateway_id) = peers.get_gateway_peer_id(dst_peer_id).await {
if let Some(gateway_id) = peers
.get_gateway_peer_id(dst_peer_id, NextHopPolicy::LeastHop)
.await
{
tracing::trace!(
?dst_peer_id,
?gateway_id,
@@ -149,6 +155,8 @@ pub struct PeerManager {
foreign_network_client: Arc<ForeignNetworkClient>,
encryptor: Arc<Box<dyn Encryptor>>,
exit_nodes: Vec<Ipv4Addr>,
}
impl Debug for PeerManager {
@@ -176,12 +184,25 @@ impl PeerManager {
my_peer_id,
));
let encryptor: Arc<Box<dyn Encryptor>> =
Arc::new(if global_ctx.get_flags().enable_encryption {
Box::new(AesGcmCipher::new_128(global_ctx.get_128_key()))
} else {
Box::new(NullCipher)
});
let mut encryptor: Arc<Box<dyn Encryptor>> = Arc::new(Box::new(NullCipher));
if global_ctx.get_flags().enable_encryption {
#[cfg(feature = "wireguard")]
{
use super::encrypt::ring_aes_gcm::AesGcmCipher;
encryptor = Arc::new(Box::new(AesGcmCipher::new_128(global_ctx.get_128_key())));
}
#[cfg(all(feature = "aes-gcm", not(feature = "wireguard")))]
{
use super::encrypt::aes_gcm::AesGcmCipher;
encryptor = Arc::new(Box::new(AesGcmCipher::new_128(global_ctx.get_128_key())));
}
#[cfg(all(not(feature = "wireguard"), not(feature = "aes-gcm")))]
{
compile_error!("wireguard or aes-gcm feature must be enabled for encryption");
}
}
// TODO: remove these because we have impl pipeline processor.
let (peer_rpc_tspt_sender, peer_rpc_tspt_recv) = mpsc::unbounded_channel();
@@ -219,6 +240,8 @@ impl PeerManager {
my_peer_id,
));
let exit_nodes = global_ctx.config.get_exit_nodes();
PeerManager {
my_peer_id,
@@ -243,6 +266,7 @@ impl PeerManager {
foreign_network_client,
encryptor,
exit_nodes,
}
}
@@ -306,36 +330,47 @@ impl PeerManager {
let my_peer_id = self.my_peer_id;
let peers = self.peers.clone();
let pipe_line = self.peer_packet_process_pipeline.clone();
let foreign_client = self.foreign_network_client.clone();
let encryptor = self.encryptor.clone();
self.tasks.lock().await.spawn(async move {
log::trace!("start_peer_recv");
while let Some(mut ret) = recv.next().await {
let Some(hdr) = ret.peer_manager_header() else {
let Some(hdr) = ret.mut_peer_manager_header() else {
tracing::warn!(?ret, "invalid packet, skip");
continue;
};
tracing::trace!(?hdr, ?ret, "peer recv a packet...");
tracing::trace!(?hdr, "peer recv a packet...");
let from_peer_id = hdr.from_peer_id.get();
let to_peer_id = hdr.to_peer_id.get();
if to_peer_id != my_peer_id {
if hdr.forward_counter > 7 {
tracing::warn!(?hdr, "forward counter exceed, drop packet");
continue;
}
if hdr.forward_counter > 2 && hdr.is_latency_first() {
tracing::trace!(?hdr, "set_latency_first false because too many hop");
hdr.set_latency_first(false);
}
hdr.forward_counter += 1;
tracing::trace!(?to_peer_id, ?my_peer_id, "need forward");
let ret = peers.send_msg(ret, to_peer_id).await;
let ret =
Self::send_msg_internal(&peers, &foreign_client, ret, to_peer_id).await;
if ret.is_err() {
tracing::error!(?ret, ?to_peer_id, ?from_peer_id, "forward packet error");
}
} else {
if let Err(e) = encryptor
.decrypt(&mut ret)
.with_context(|| "decrypt failed")
{
if let Err(e) = encryptor.decrypt(&mut ret) {
tracing::error!(?e, "decrypt failed");
continue;
}
let mut processed = false;
let mut zc_packet = Some(ret);
let mut idx = 0;
for pipeline in pipe_line.read().await.iter().rev() {
tracing::debug!(?zc_packet, ?idx, "try_process_packet_from_peer");
tracing::trace!(?zc_packet, ?idx, "try_process_packet_from_peer");
idx += 1;
zc_packet = pipeline
.try_process_packet_from_peer(zc_packet.unwrap())
@@ -506,8 +541,34 @@ impl PeerManager {
}
}
fn get_next_hop_policy(is_first_latency: bool) -> NextHopPolicy {
if is_first_latency {
NextHopPolicy::LeastCost
} else {
NextHopPolicy::LeastHop
}
}
pub async fn send_msg(&self, msg: ZCPacket, dst_peer_id: PeerId) -> Result<(), Error> {
self.peers.send_msg(msg, dst_peer_id).await
Self::send_msg_internal(&self.peers, &self.foreign_network_client, msg, dst_peer_id).await
}
async fn send_msg_internal(
peers: &Arc<PeerMap>,
foreign_network_client: &Arc<ForeignNetworkClient>,
msg: ZCPacket,
dst_peer_id: PeerId,
) -> Result<(), Error> {
let policy =
Self::get_next_hop_policy(msg.peer_manager_header().unwrap().is_latency_first());
if let Some(gateway) = peers.get_gateway_peer_id(dst_peer_id, policy).await {
peers.send_msg_directly(msg, gateway).await
} else if foreign_network_client.has_next_hop(dst_peer_id) {
foreign_network_client.send_msg(msg, dst_peer_id).await
} else {
Err(Error::RouteError(None))
}
}
pub async fn send_msg_ipv4(&self, mut msg: ZCPacket, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
@@ -517,6 +578,7 @@ impl PeerManager {
ipv4_addr
);
let mut is_exit_node = false;
let mut dst_peers = vec![];
// NOTE: currently we only support ipv4 and cidr is 24
if ipv4_addr.is_broadcast() || ipv4_addr.is_multicast() || ipv4_addr.octets()[3] == 255 {
@@ -529,6 +591,14 @@ impl PeerManager {
);
} else if let Some(peer_id) = self.peers.get_peer_id_by_ipv4(&ipv4_addr).await {
dst_peers.push(peer_id);
} else {
for exit_node in &self.exit_nodes {
if let Some(peer_id) = self.peers.get_peer_id_by_ipv4(exit_node).await {
dst_peers.push(peer_id);
is_exit_node = true;
break;
}
}
}
if dst_peers.is_empty() {
@@ -536,12 +606,23 @@ impl PeerManager {
return Ok(());
}
msg.fill_peer_manager_hdr(self.my_peer_id, 0, packet::PacketType::Data as u8);
msg.fill_peer_manager_hdr(
self.my_peer_id,
0,
tunnel::packet_def::PacketType::Data as u8,
);
self.run_nic_packet_process_pipeline(&mut msg).await;
self.encryptor
.encrypt(&mut msg)
.with_context(|| "encrypt failed")?;
let is_latency_first = self.global_ctx.get_flags().latency_first;
msg.mut_peer_manager_header()
.unwrap()
.set_latency_first(is_latency_first)
.set_exit_node(is_exit_node);
let next_hop_policy = Self::get_next_hop_policy(is_latency_first);
let mut errs: Vec<Error> = vec![];
let mut msg = Some(msg);
@@ -559,7 +640,11 @@ impl PeerManager {
.to_peer_id
.set(*peer_id);
if let Some(gateway) = self.peers.get_gateway_peer_id(*peer_id).await {
if let Some(gateway) = self
.peers
.get_gateway_peer_id(*peer_id, next_hop_policy.clone())
.await
{
if let Err(e) = self.peers.send_msg_directly(msg, gateway).await {
errs.push(e);
}
@@ -662,18 +747,21 @@ impl PeerManager {
#[cfg(test)]
mod tests {
use std::{fmt::Debug, sync::Arc};
use std::{fmt::Debug, sync::Arc, time::Duration};
use crate::{
common::{config::Flags, global_ctx::tests::get_mock_global_ctx},
connector::{
create_connector_by_url, udp_hole_punch::tests::create_mock_peer_manager_with_mock_stun,
},
instance::listeners::get_listener_by_url,
peers::{
peer_manager::RouteAlgoType,
peer_rpc::tests::{MockService, TestRpcService, TestRpcServiceClient},
tests::{connect_peer_manager, wait_for_condition, wait_route_appear},
tests::{connect_peer_manager, wait_route_appear},
},
rpc::NatType,
tunnel::common::tests::wait_for_condition,
tunnel::{TunnelConnector, TunnelListener},
};
@@ -804,4 +892,36 @@ mod tests {
.unwrap();
assert_eq!(ret, "hello c abc");
}
#[tokio::test]
async fn communicate_between_enc_and_non_enc() {
let create_mgr = |enable_encryption| async move {
let (s, _r) = tokio::sync::mpsc::channel(1000);
let mock_global_ctx = get_mock_global_ctx();
mock_global_ctx.config.set_flags(Flags {
enable_encryption,
..Default::default()
});
let peer_mgr = Arc::new(PeerManager::new(RouteAlgoType::Ospf, mock_global_ctx, s));
peer_mgr.run().await.unwrap();
peer_mgr
};
let peer_mgr_a = create_mgr(true).await;
let peer_mgr_b = create_mgr(false).await;
connect_peer_manager(peer_mgr_a.clone(), peer_mgr_b.clone()).await;
// wait 5sec should not crash.
tokio::time::sleep(Duration::from_secs(5)).await;
// both mgr should alive
let mgr_c = create_mgr(true).await;
connect_peer_manager(peer_mgr_a.clone(), mgr_c.clone()).await;
wait_route_appear(mgr_c, peer_mgr_a).await.unwrap();
let mgr_d = create_mgr(false).await;
connect_peer_manager(peer_mgr_b.clone(), mgr_d.clone()).await;
wait_route_appear(mgr_d, peer_mgr_b).await.unwrap();
}
}

View File

@@ -18,7 +18,7 @@ use crate::{
use super::{
peer::Peer,
peer_conn::{PeerConn, PeerConnId},
route_trait::ArcRoute,
route_trait::{ArcRoute, NextHopPolicy},
PacketRecvChan,
};
@@ -94,18 +94,25 @@ impl PeerMap {
Ok(())
}
pub async fn get_gateway_peer_id(&self, dst_peer_id: PeerId) -> Option<PeerId> {
pub async fn get_gateway_peer_id(
&self,
dst_peer_id: PeerId,
policy: NextHopPolicy,
) -> Option<PeerId> {
if dst_peer_id == self.my_peer_id {
return Some(dst_peer_id);
}
if self.has_peer(dst_peer_id) {
if self.has_peer(dst_peer_id) && matches!(policy, NextHopPolicy::LeastHop) {
return Some(dst_peer_id);
}
// get route info
for route in self.routes.read().await.iter() {
if let Some(gateway_peer_id) = route.get_next_hop(dst_peer_id).await {
if let Some(gateway_peer_id) = route
.get_next_hop_with_policy(dst_peer_id, policy.clone())
.await
{
// for foreign network, gateway_peer_id may not connect to me
if self.has_peer(gateway_peer_id) {
return Some(gateway_peer_id);
@@ -116,8 +123,13 @@ impl PeerMap {
None
}
pub async fn send_msg(&self, msg: ZCPacket, dst_peer_id: PeerId) -> Result<(), Error> {
let Some(gateway_peer_id) = self.get_gateway_peer_id(dst_peer_id).await else {
pub async fn send_msg(
&self,
msg: ZCPacket,
dst_peer_id: PeerId,
policy: NextHopPolicy,
) -> Result<(), Error> {
let Some(gateway_peer_id) = self.get_gateway_peer_id(dst_peer_id, policy).await else {
return Err(Error::RouteError(Some(format!(
"peer map sengmsg no gateway for dst_peer_id: {}",
dst_peer_id

View File

@@ -3,13 +3,18 @@ use std::{
fmt::Debug,
net::Ipv4Addr,
sync::{
atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering},
atomic::{AtomicBool, AtomicU32, Ordering},
Arc, Weak,
},
time::{Duration, SystemTime},
};
use dashmap::DashMap;
use petgraph::{
algo::{all_simple_paths, astar, dijkstra},
graph::NodeIndex,
Directed, Graph,
};
use serde::{Deserialize, Serialize};
use tokio::{select, sync::Mutex, task::JoinSet};
@@ -19,7 +24,14 @@ use crate::{
rpc::{NatType, StunInfo},
};
use super::{peer_rpc::PeerRpcManager, PeerPacketFilter};
use super::{
peer_rpc::PeerRpcManager,
route_trait::{
DefaultRouteCostCalculator, NextHopPolicy, RouteCostCalculator,
RouteCostCalculatorInterface,
},
PeerPacketFilter,
};
static SERVICE_ID: u32 = 7;
static UPDATE_PEER_INFO_PERIOD: Duration = Duration::from_secs(3600);
@@ -101,7 +113,7 @@ impl RoutePeerInfo {
.map(|x| x.to_string())
.chain(global_ctx.get_vpn_portal_cidr().map(|x| x.to_string()))
.collect(),
hostname: global_ctx.get_hostname(),
hostname: Some(global_ctx.get_hostname()),
udp_stun_info: global_ctx
.get_stun_info_collector()
.get_stun_info()
@@ -138,11 +150,7 @@ impl Into<crate::rpc::Route> for RoutePeerInfo {
next_hop_peer_id: 0,
cost: self.cost as i32,
proxy_cidrs: self.proxy_cidrs.clone(),
hostname: if let Some(hostname) = &self.hostname {
hostname.clone()
} else {
"".to_string()
},
hostname: self.hostname.unwrap_or_default(),
stun_info: {
let mut stun_info = StunInfo::default();
if let Ok(udp_nat_type) = NatType::try_from(self.udp_stun_info as i32) {
@@ -364,11 +372,15 @@ impl SyncedRouteInfo {
}
}
type PeerGraph = Graph<PeerId, i32, Directed>;
type PeerIdToNodexIdxMap = DashMap<PeerId, NodeIndex>;
type NextHopMap = DashMap<PeerId, (PeerId, i32)>;
// computed with SyncedRouteInfo. used to get next hop.
#[derive(Debug)]
struct RouteTable {
peer_infos: DashMap<PeerId, RoutePeerInfo>,
next_hop_map: DashMap<PeerId, (PeerId, i32)>,
next_hop_map: NextHopMap,
ipv4_peer_id_map: DashMap<Ipv4Addr, PeerId>,
cidr_peer_id_map: DashMap<cidr::IpCidr, PeerId>,
}
@@ -397,7 +409,121 @@ impl RouteTable {
.map(|x| NatType::try_from(x.udp_stun_info as i32).unwrap())
}
fn build_from_synced_info(&self, my_peer_id: PeerId, synced_info: &SyncedRouteInfo) {
fn build_peer_graph_from_synced_info<T: RouteCostCalculatorInterface>(
peers: Vec<PeerId>,
synced_info: &SyncedRouteInfo,
cost_calc: &mut T,
) -> (PeerGraph, PeerIdToNodexIdxMap) {
let mut graph: PeerGraph = Graph::new();
let peer_id_to_node_index = PeerIdToNodexIdxMap::new();
for peer_id in peers.iter() {
peer_id_to_node_index.insert(*peer_id, graph.add_node(*peer_id));
}
for peer_id in peers.iter() {
let connected_peers = synced_info
.get_connected_peers(*peer_id)
.unwrap_or(BTreeSet::new());
for dst_peer_id in connected_peers.iter() {
let Some(dst_idx) = peer_id_to_node_index.get(dst_peer_id) else {
continue;
};
graph.add_edge(
*peer_id_to_node_index.get(&peer_id).unwrap(),
*dst_idx,
cost_calc.calculate_cost(*peer_id, *dst_peer_id),
);
}
}
(graph, peer_id_to_node_index)
}
fn gen_next_hop_map_with_least_hop<T: RouteCostCalculatorInterface>(
my_peer_id: PeerId,
graph: &PeerGraph,
idx_map: &PeerIdToNodexIdxMap,
cost_calc: &mut T,
) -> NextHopMap {
let res = dijkstra(&graph, *idx_map.get(&my_peer_id).unwrap(), None, |_| 1);
let next_hop_map = NextHopMap::new();
for (node_idx, cost) in res.iter() {
if *cost == 0 {
continue;
}
let all_paths = all_simple_paths::<Vec<_>, _>(
graph,
*idx_map.get(&my_peer_id).unwrap(),
*node_idx,
*cost - 1,
Some(*cost - 1),
)
.collect::<Vec<_>>();
assert!(!all_paths.is_empty());
// find a path with least cost.
let mut min_cost = i32::MAX;
let mut min_path = Vec::new();
for path in all_paths.iter() {
let mut cost = 0;
for i in 0..path.len() - 1 {
let src_peer_id = *graph.node_weight(path[i]).unwrap();
let dst_peer_id = *graph.node_weight(path[i + 1]).unwrap();
cost += cost_calc.calculate_cost(src_peer_id, dst_peer_id);
}
if cost <= min_cost {
min_cost = cost;
min_path = path.clone();
}
}
next_hop_map.insert(
*graph.node_weight(*node_idx).unwrap(),
(*graph.node_weight(min_path[1]).unwrap(), *cost as i32),
);
}
next_hop_map
}
fn gen_next_hop_map_with_least_cost(
my_peer_id: PeerId,
graph: &PeerGraph,
idx_map: &PeerIdToNodexIdxMap,
) -> NextHopMap {
let next_hop_map = NextHopMap::new();
for item in idx_map.iter() {
if *item.key() == my_peer_id {
continue;
}
let dst_peer_node_idx = *item.value();
let Some((cost, path)) = astar::astar(
graph,
*idx_map.get(&my_peer_id).unwrap(),
|node_idx| node_idx == dst_peer_node_idx,
|e| *e.weight(),
|_| 0,
) else {
continue;
};
next_hop_map.insert(*item.key(), (*graph.node_weight(path[1]).unwrap(), cost));
}
next_hop_map
}
fn build_from_synced_info<T: RouteCostCalculatorInterface>(
&self,
my_peer_id: PeerId,
synced_info: &SyncedRouteInfo,
policy: NextHopPolicy,
mut cost_calc: T,
) {
// build peer_infos
self.peer_infos.clear();
for item in synced_info.peer_infos.iter() {
@@ -411,31 +537,27 @@ impl RouteTable {
self.peer_infos.insert(*peer_id, info.clone());
}
if self.peer_infos.is_empty() {
return;
}
// build next hop map
self.next_hop_map.clear();
self.next_hop_map.insert(my_peer_id, (my_peer_id, 0));
for item in self.peer_infos.iter() {
let peer_id = *item.key();
if peer_id == my_peer_id {
continue;
}
let Some(path) = pathfinding::prelude::bfs(
&my_peer_id,
|p| {
synced_info
.get_connected_peers(*p)
.unwrap_or_else(|| BTreeSet::new())
},
|x| *x == peer_id,
) else {
continue;
};
if !path.is_empty() {
assert!(path.len() >= 2);
self.next_hop_map
.insert(peer_id, (path[1], (path.len() - 1) as i32));
}
let (graph, idx_map) = Self::build_peer_graph_from_synced_info(
self.peer_infos.iter().map(|x| *x.key()).collect(),
&synced_info,
&mut cost_calc,
);
let next_hop_map = if matches!(policy, NextHopPolicy::LeastHop) {
Self::gen_next_hop_map_with_least_hop(my_peer_id, &graph, &idx_map, &mut cost_calc)
} else {
Self::gen_next_hop_map_with_least_cost(my_peer_id, &graph, &idx_map)
};
for item in next_hop_map.iter() {
self.next_hop_map.insert(*item.key(), *item.value());
}
// build graph
// build ipv4_peer_id_map, cidr_peer_id_map
self.ipv4_peer_id_map.clear();
@@ -473,7 +595,8 @@ impl RouteTable {
}
type SessionId = u64;
type AtomicSessionId = AtomicU64;
type AtomicSessionId = atomic_shim::AtomicU64;
// if we need to sync route info with one peer, we create a SyncRouteSession with that peer.
#[derive(Debug)]
@@ -566,7 +689,9 @@ struct PeerRouteServiceImpl {
interface: Arc<Mutex<Option<RouteInterfaceBox>>>,
cost_calculator: Arc<std::sync::Mutex<Option<RouteCostCalculator>>>,
route_table: RouteTable,
route_table_with_cost: RouteTable,
synced_route_info: Arc<SyncedRouteInfo>,
cached_local_conn_map: std::sync::Mutex<RouteConnBitmap>,
}
@@ -588,9 +713,17 @@ impl PeerRouteServiceImpl {
PeerRouteServiceImpl {
my_peer_id,
global_ctx,
interface: Arc::new(Mutex::new(None)),
sessions: DashMap::new(),
interface: Arc::new(Mutex::new(None)),
cost_calculator: Arc::new(std::sync::Mutex::new(Some(Box::new(
DefaultRouteCostCalculator,
)))),
route_table: RouteTable::new(),
route_table_with_cost: RouteTable::new(),
synced_route_info: Arc::new(SyncedRouteInfo {
peer_infos: DashMap::new(),
conn_map: DashMap::new(),
@@ -652,8 +785,32 @@ impl PeerRouteServiceImpl {
}
fn update_route_table(&self) {
self.route_table
.build_from_synced_info(self.my_peer_id, &self.synced_route_info);
let mut calc_locked = self.cost_calculator.lock().unwrap();
calc_locked.as_mut().unwrap().begin_update();
self.route_table.build_from_synced_info(
self.my_peer_id,
&self.synced_route_info,
NextHopPolicy::LeastHop,
calc_locked.as_mut().unwrap(),
);
self.route_table_with_cost.build_from_synced_info(
self.my_peer_id,
&self.synced_route_info,
NextHopPolicy::LeastCost,
calc_locked.as_mut().unwrap(),
);
calc_locked.as_mut().unwrap().end_update();
}
fn cost_calculator_need_update(&self) -> bool {
self.cost_calculator
.lock()
.unwrap()
.as_ref()
.map(|x| x.need_update())
.unwrap_or(false)
}
fn update_route_table_and_cached_local_conn_bitmap(&self) {
@@ -1176,6 +1333,7 @@ impl PeerRoute {
session_mgr.maintain_sessions(service_impl).await;
}
#[tracing::instrument(skip(session_mgr))]
async fn update_my_peer_info_routine(
service_impl: Arc<PeerRouteServiceImpl>,
session_mgr: RouteSessionManager,
@@ -1186,6 +1344,11 @@ impl PeerRoute {
session_mgr.sync_now("update_my_infos");
}
if service_impl.cost_calculator_need_update() {
tracing::debug!("cost_calculator_need_update");
service_impl.update_route_table();
}
select! {
ev = global_event_receiver.recv() => {
tracing::info!(?ev, "global event received in update_my_peer_info_routine");
@@ -1237,6 +1400,19 @@ impl Route for PeerRoute {
route_table.get_next_hop(dst_peer_id).map(|x| x.0)
}
async fn get_next_hop_with_policy(
&self,
dst_peer_id: PeerId,
policy: NextHopPolicy,
) -> Option<PeerId> {
let route_table = if matches!(policy, NextHopPolicy::LeastCost) {
&self.service_impl.route_table_with_cost
} else {
&self.service_impl.route_table
};
route_table.get_next_hop(dst_peer_id).map(|x| x.0)
}
async fn list_routes(&self) -> Vec<crate::rpc::Route> {
let route_table = &self.service_impl.route_table;
let mut routes = Vec::new();
@@ -1268,6 +1444,11 @@ impl Route for PeerRoute {
tracing::info!(?ipv4_addr, "no peer id for ipv4");
None
}
async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {
*self.service_impl.cost_calculator.lock().unwrap() = Some(_cost_fn);
self.service_impl.update_route_table();
}
}
impl PeerPacketFilter for Arc<PeerRoute> {}
@@ -1285,10 +1466,11 @@ mod tests {
connector::udp_hole_punch::tests::replace_stun_info_collector,
peers::{
peer_manager::{PeerManager, RouteAlgoType},
route_trait::Route,
tests::{connect_peer_manager, wait_for_condition},
route_trait::{NextHopPolicy, Route, RouteCostCalculatorInterface},
tests::connect_peer_manager,
},
rpc::NatType,
tunnel::common::tests::wait_for_condition,
};
use super::PeerRoute;
@@ -1612,4 +1794,91 @@ mod tests {
println!("session: {:?}", r_a.session_mgr.dump_sessions());
check_rpc_counter(&r_a, p_b.my_peer_id(), 2, 2);
}
#[tokio::test]
async fn test_cost_calculator() {
let p_a = create_mock_pmgr().await;
let p_b = create_mock_pmgr().await;
let p_c = create_mock_pmgr().await;
let p_d = create_mock_pmgr().await;
connect_peer_manager(p_a.clone(), p_b.clone()).await;
connect_peer_manager(p_a.clone(), p_c.clone()).await;
connect_peer_manager(p_d.clone(), p_b.clone()).await;
connect_peer_manager(p_d.clone(), p_c.clone()).await;
connect_peer_manager(p_b.clone(), p_c.clone()).await;
let _r_a = create_mock_route(p_a.clone()).await;
let _r_b = create_mock_route(p_b.clone()).await;
let _r_c = create_mock_route(p_c.clone()).await;
let r_d = create_mock_route(p_d.clone()).await;
// in normal mode, packet from p_c should directly forward to p_a
wait_for_condition(
|| async { r_d.get_next_hop(p_a.my_peer_id()).await != None },
Duration::from_secs(5),
)
.await;
struct TestCostCalculator {
p_a_peer_id: PeerId,
p_b_peer_id: PeerId,
p_c_peer_id: PeerId,
p_d_peer_id: PeerId,
}
impl RouteCostCalculatorInterface for TestCostCalculator {
fn calculate_cost(&self, src: PeerId, dst: PeerId) -> i32 {
if src == self.p_d_peer_id && dst == self.p_b_peer_id {
return 100;
}
if src == self.p_d_peer_id && dst == self.p_c_peer_id {
return 1;
}
if src == self.p_c_peer_id && dst == self.p_a_peer_id {
return 101;
}
if src == self.p_b_peer_id && dst == self.p_a_peer_id {
return 1;
}
if src == self.p_c_peer_id && dst == self.p_b_peer_id {
return 2;
}
1
}
}
r_d.set_route_cost_fn(Box::new(TestCostCalculator {
p_a_peer_id: p_a.my_peer_id(),
p_b_peer_id: p_b.my_peer_id(),
p_c_peer_id: p_c.my_peer_id(),
p_d_peer_id: p_d.my_peer_id(),
}))
.await;
// after set cost, packet from p_c should forward to p_b first
wait_for_condition(
|| async {
r_d.get_next_hop_with_policy(p_a.my_peer_id(), NextHopPolicy::LeastCost)
.await
== Some(p_c.my_peer_id())
},
Duration::from_secs(5),
)
.await;
wait_for_condition(
|| async {
r_d.get_next_hop_with_policy(p_a.my_peer_id(), NextHopPolicy::LeastHop)
.await
== Some(p_b.my_peer_id())
},
Duration::from_secs(5),
)
.await;
}
}

View File

@@ -52,7 +52,7 @@ impl SyncPeerInfo {
.map(|x| x.to_string())
.chain(global_ctx.get_vpn_portal_cidr().map(|x| x.to_string()))
.collect(),
hostname: global_ctx.get_hostname(),
hostname: Some(global_ctx.get_hostname()),
udp_stun_info: global_ctx
.get_stun_info_collector()
.get_stun_info()
@@ -585,11 +585,7 @@ impl Route for BasicRoute {
route.next_hop_peer_id = route_info.peer_id;
route.cost = route_info.cost as i32;
route.proxy_cidrs = route_info.proxy_cidrs.clone();
route.hostname = if let Some(hostname) = &route_info.hostname {
hostname.clone()
} else {
"".to_string()
};
route.hostname = route_info.hostname.clone().unwrap_or_default();
let mut stun_info = StunInfo::default();
if let Ok(udp_nat_type) = NatType::try_from(route_info.udp_stun_info as i32) {

View File

@@ -1,5 +1,12 @@
use std::sync::{atomic::AtomicU32, Arc};
use std::{
sync::{
atomic::{AtomicBool, AtomicU32, Ordering},
Arc,
},
time::Instant,
};
use crossbeam::atomic::AtomicCell;
use dashmap::DashMap;
use futures::{SinkExt, StreamExt};
use prost::Message;
@@ -18,6 +25,8 @@ use crate::{
tunnel::packet_def::{PacketType, ZCPacket},
};
const RPC_PACKET_CONTENT_MTU: usize = 1300;
type PeerRpcServiceId = u32;
type PeerRpcTransactId = u32;
@@ -34,10 +43,13 @@ type PacketSender = UnboundedSender<ZCPacket>;
struct PeerRpcEndPoint {
peer_id: PeerId,
packet_sender: PacketSender,
create_time: AtomicCell<Instant>,
finished: Arc<AtomicBool>,
tasks: JoinSet<()>,
}
type PeerRpcEndPointCreator = Box<dyn Fn(PeerId) -> PeerRpcEndPoint + Send + Sync + 'static>;
type PeerRpcEndPointCreator =
Box<dyn Fn(PeerId, PeerRpcTransactId) -> PeerRpcEndPoint + Send + Sync + 'static>;
#[derive(Hash, Eq, PartialEq, Clone)]
struct PeerRpcClientCtxKey(PeerId, PeerRpcServiceId, PeerRpcTransactId);
@@ -48,8 +60,8 @@ pub struct PeerRpcManager {
tspt: Arc<Box<dyn PeerRpcManagerTransport>>,
service_registry: Arc<DashMap<PeerRpcServiceId, PeerRpcEndPointCreator>>,
peer_rpc_endpoints: Arc<DashMap<(PeerId, PeerRpcServiceId), PeerRpcEndPoint>>,
peer_rpc_endpoints: Arc<DashMap<PeerRpcClientCtxKey, PeerRpcEndPoint>>,
client_resp_receivers: Arc<DashMap<PeerRpcClientCtxKey, PacketSender>>,
transact_id: AtomicU32,
@@ -63,6 +75,95 @@ impl std::fmt::Debug for PeerRpcManager {
}
}
struct PacketMerger {
first_piece: Option<TaRpcPacket>,
pieces: Vec<TaRpcPacket>,
}
impl PacketMerger {
fn new() -> Self {
Self {
first_piece: None,
pieces: Vec::new(),
}
}
fn try_merge_pieces(&self) -> Option<TaRpcPacket> {
if self.first_piece.is_none() || self.pieces.is_empty() {
return None;
}
for p in &self.pieces {
// some piece is missing
if p.total_pieces == 0 {
return None;
}
}
// all pieces are received
let mut content = Vec::new();
for p in &self.pieces {
content.extend_from_slice(&p.content);
}
let mut tmpl_packet = self.first_piece.as_ref().unwrap().clone();
tmpl_packet.total_pieces = 1;
tmpl_packet.piece_idx = 0;
tmpl_packet.content = content;
Some(tmpl_packet)
}
fn feed(
&mut self,
packet: ZCPacket,
expected_tid: Option<PeerRpcTransactId>,
) -> Result<Option<TaRpcPacket>, Error> {
let payload = packet.payload();
let rpc_packet =
TaRpcPacket::decode(payload).map_err(|e| Error::MessageDecodeError(e.to_string()))?;
if expected_tid.is_some() && rpc_packet.transact_id != expected_tid.unwrap() {
return Ok(None);
}
let total_pieces = rpc_packet.total_pieces;
let piece_idx = rpc_packet.piece_idx;
// for compatibility with old version
if total_pieces == 0 && piece_idx == 0 {
return Ok(Some(rpc_packet));
}
if total_pieces > 100 || total_pieces == 0 {
return Err(Error::MessageDecodeError(format!(
"total_pieces is invalid: {}",
total_pieces
)));
}
if piece_idx >= total_pieces {
return Err(Error::MessageDecodeError(
"piece_idx >= total_pieces".to_owned(),
));
}
if self.first_piece.is_none()
|| self.first_piece.as_ref().unwrap().transact_id != rpc_packet.transact_id
|| self.first_piece.as_ref().unwrap().from_peer != rpc_packet.from_peer
{
self.first_piece = Some(rpc_packet.clone());
self.pieces.clear();
}
self.pieces
.resize(total_pieces as usize, Default::default());
self.pieces[piece_idx as usize] = rpc_packet;
Ok(self.try_merge_pieces())
}
}
impl PeerRpcManager {
pub fn new(tspt: impl PeerRpcManagerTransport) -> Self {
Self {
@@ -88,11 +189,12 @@ impl PeerRpcManager {
S::Fut: Send + 'static,
{
let tspt = self.tspt.clone();
let creator = Box::new(move |peer_id: PeerId| {
let creator = Box::new(move |peer_id: PeerId, transact_id: PeerRpcTransactId| {
let mut tasks = JoinSet::new();
let (packet_sender, mut packet_receiver) = mpsc::unbounded_channel();
let (mut client_transport, server_transport) = tarpc::transport::channel::unbounded();
let server = tarpc::server::BaseChannel::with_defaults(server_transport);
let finished = Arc::new(AtomicBool::new(false));
let my_peer_id_clone = tspt.my_peer_id();
let peer_id_clone = peer_id.clone();
@@ -101,18 +203,13 @@ impl PeerRpcManager {
tasks.spawn(o);
let tspt = tspt.clone();
let finished_clone = finished.clone();
tasks.spawn(async move {
let mut cur_req_peer_id = None;
let mut cur_transact_id = 0;
let mut packet_merger = PacketMerger::new();
loop {
tokio::select! {
Some(resp) = client_transport.next() => {
let Some(cur_req_peer_id) = cur_req_peer_id.take() else {
tracing::error!("[PEER RPC MGR] cur_req_peer_id is none, ignore this resp");
continue;
};
tracing::debug!(resp = ?resp, "server recv packet from service provider");
tracing::debug!(resp = ?resp, ?transact_id, ?peer_id, "server recv packet from service provider");
if resp.is_err() {
tracing::warn!(err = ?resp.err(),
"[PEER RPC MGR] client_transport in server side got channel error, ignore it.");
@@ -126,43 +223,43 @@ impl PeerRpcManager {
continue;
}
let msg = Self::build_rpc_packet(
let msgs = Self::build_rpc_packet(
tspt.my_peer_id(),
cur_req_peer_id,
peer_id,
service_id,
cur_transact_id,
transact_id,
false,
serialized_resp.unwrap(),
serialized_resp.as_ref().unwrap(),
);
if let Err(e) = tspt.send(msg, peer_id).await {
tracing::error!(error = ?e, peer_id = ?peer_id, service_id = ?service_id, "send resp to peer failed");
for msg in msgs {
if let Err(e) = tspt.send(msg, peer_id).await {
tracing::error!(error = ?e, peer_id = ?peer_id, service_id = ?service_id, "send resp to peer failed");
break;
}
}
finished_clone.store(true, Ordering::Relaxed);
}
Some(packet) = packet_receiver.recv() => {
let info = Self::parse_rpc_packet(&packet);
tracing::debug!(?info, "server recv packet from peer");
if let Err(e) = info {
tracing::error!(error = ?e, packet = ?packet, "parse rpc packet failed");
continue;
}
let info = info.unwrap();
tracing::trace!("recv packet from peer, packet: {:?}", packet);
if info.from_peer != peer_id {
tracing::warn!("recv packet from peer, but peer_id not match, ignore it");
continue;
}
if cur_req_peer_id.is_some() {
tracing::warn!("cur_req_peer_id is not none, ignore this packet");
continue;
}
let info = match packet_merger.feed(packet, None) {
Err(e) => {
tracing::error!(error = ?e, "feed packet to merger failed");
continue;
},
Ok(None) => {
continue;
},
Ok(Some(info)) => {
info
}
};
assert_eq!(info.service_id, service_id);
cur_req_peer_id = Some(info.from_peer);
cur_transact_id = info.transact_id;
tracing::trace!("recv packet from peer, packet: {:?}", packet);
assert_eq!(info.from_peer, peer_id);
assert_eq!(info.transact_id, transact_id);
let decoded_ret = postcard::from_bytes(&info.content.as_slice());
if let Err(e) = decoded_ret {
@@ -191,6 +288,8 @@ impl PeerRpcManager {
return PeerRpcEndPoint {
peer_id,
packet_sender,
create_time: AtomicCell::new(Instant::now()),
finished,
tasks,
};
// let resp = client_transport.next().await;
@@ -221,22 +320,41 @@ impl PeerRpcManager {
service_id: PeerRpcServiceId,
transact_id: PeerRpcTransactId,
is_req: bool,
content: Vec<u8>,
) -> ZCPacket {
let packet = TaRpcPacket {
from_peer,
to_peer,
service_id,
transact_id,
is_req,
content,
};
let mut buf = Vec::new();
packet.encode(&mut buf).unwrap();
content: &Vec<u8>,
) -> Vec<ZCPacket> {
let mut ret = Vec::new();
let content_mtu = RPC_PACKET_CONTENT_MTU;
let total_pieces = (content.len() + content_mtu - 1) / content_mtu;
let mut cur_offset = 0;
while cur_offset < content.len() {
let mut cur_len = content_mtu;
if cur_offset + cur_len > content.len() {
cur_len = content.len() - cur_offset;
}
let mut zc_packet = ZCPacket::new_with_payload(&buf);
zc_packet.fill_peer_manager_hdr(from_peer, to_peer, PacketType::TaRpc as u8);
zc_packet
let mut cur_content = Vec::new();
cur_content.extend_from_slice(&content[cur_offset..cur_offset + cur_len]);
let cur_packet = TaRpcPacket {
from_peer,
to_peer,
service_id,
transact_id,
is_req,
total_pieces: total_pieces as u32,
piece_idx: (cur_offset / content_mtu) as u32,
content: cur_content,
};
cur_offset += cur_len;
let mut buf = Vec::new();
cur_packet.encode(&mut buf).unwrap();
let mut zc_packet = ZCPacket::new_with_payload(&buf);
zc_packet.fill_peer_manager_hdr(from_peer, to_peer, PacketType::TaRpc as u8);
ret.push(zc_packet);
}
ret
}
pub fn run(&self) {
@@ -265,9 +383,16 @@ impl PeerRpcManager {
}
let endpoint = peer_rpc_endpoints
.entry((info.from_peer, info.service_id))
.entry(PeerRpcClientCtxKey(
info.from_peer,
info.service_id,
info.transact_id,
))
.or_insert_with(|| {
service_registry.get(&info.service_id).unwrap()(info.from_peer)
service_registry.get(&info.service_id).unwrap()(
info.from_peer,
info.transact_id,
)
});
endpoint.packet_sender.send(o).unwrap();
@@ -287,25 +412,46 @@ impl PeerRpcManager {
}
}
});
let peer_rpc_endpoints = self.peer_rpc_endpoints.clone();
tokio::spawn(async move {
loop {
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
peer_rpc_endpoints.retain(|_, v| {
v.create_time.load().elapsed().as_secs() < 30
&& !v.finished.load(Ordering::Relaxed)
});
}
});
}
#[tracing::instrument(skip(f))]
pub async fn do_client_rpc_scoped<CM, Req, RpcRet, Fut>(
pub async fn do_client_rpc_scoped<Resp, Req, RpcRet, Fut>(
&self,
service_id: PeerRpcServiceId,
dst_peer_id: PeerId,
f: impl FnOnce(UnboundedChannel<CM, Req>) -> Fut,
f: impl FnOnce(UnboundedChannel<Resp, Req>) -> Fut,
) -> RpcRet
where
CM: serde::Serialize + for<'a> serde::Deserialize<'a> + Send + Sync + 'static,
Req: serde::Serialize + for<'a> serde::Deserialize<'a> + Send + Sync + 'static,
Resp: serde::Serialize
+ for<'a> serde::Deserialize<'a>
+ Send
+ Sync
+ std::fmt::Debug
+ 'static,
Req: serde::Serialize
+ for<'a> serde::Deserialize<'a>
+ Send
+ Sync
+ std::fmt::Debug
+ 'static,
Fut: std::future::Future<Output = RpcRet>,
{
let mut tasks = JoinSet::new();
let (packet_sender, mut packet_receiver) = mpsc::unbounded_channel();
let (client_transport, server_transport) =
tarpc::transport::channel::unbounded::<CM, Req>();
tarpc::transport::channel::unbounded::<Resp, Req>();
let (mut server_s, mut server_r) = server_transport.split();
@@ -321,25 +467,28 @@ impl PeerRpcManager {
continue;
}
let a = postcard::to_allocvec(&a.unwrap());
if a.is_err() {
tracing::error!(error = ?a.err(), "bincode serialize failed");
let req = postcard::to_allocvec(&a.unwrap());
if req.is_err() {
tracing::error!(error = ?req.err(), "bincode serialize failed");
continue;
}
let packet = Self::build_rpc_packet(
let packets = Self::build_rpc_packet(
tspt.my_peer_id(),
dst_peer_id,
service_id,
transact_id,
true,
a.unwrap(),
req.as_ref().unwrap(),
);
tracing::debug!(?packet, "client send rpc packet to peer");
tracing::debug!(?packets, ?req, ?transact_id, "client send rpc packet to peer");
if let Err(e) = tspt.send(packet, dst_peer_id).await {
tracing::error!(error = ?e, dst_peer_id = ?dst_peer_id, "send to peer failed");
for packet in packets {
if let Err(e) = tspt.send(packet, dst_peer_id).await {
tracing::error!(error = ?e, dst_peer_id = ?dst_peer_id, "send to peer failed");
break;
}
}
}
@@ -347,17 +496,26 @@ impl PeerRpcManager {
});
tasks.spawn(async move {
let mut packet_merger = PacketMerger::new();
while let Some(packet) = packet_receiver.recv().await {
tracing::trace!("tunnel recv: {:?}", packet);
let info = Self::parse_rpc_packet(&packet);
if let Err(e) = info {
tracing::error!(error = ?e, "parse rpc packet failed");
continue;
}
tracing::debug!(?info, "client recv rpc packet from peer");
let info = match packet_merger.feed(packet, Some(transact_id)) {
Err(e) => {
tracing::error!(error = ?e, "feed packet to merger failed");
continue;
}
Ok(None) => {
continue;
}
Ok(Some(info)) => info,
};
let decoded = postcard::from_bytes(&info.content.as_slice());
tracing::debug!(?info, ?decoded, "client recv rpc packet from peer");
assert_eq!(info.transact_id, transact_id);
let decoded = postcard::from_bytes(&info.unwrap().content.as_slice());
if let Err(e) = decoded {
tracing::error!(error = ?e, "decode rpc packet failed");
continue;
@@ -390,7 +548,7 @@ impl PeerRpcManager {
#[cfg(test)]
pub mod tests {
use std::{pin::Pin, sync::Arc};
use std::{pin::Pin, sync::Arc, time::Duration};
use futures::{SinkExt, StreamExt};
use tokio::sync::Mutex;
@@ -402,8 +560,8 @@ pub mod tests {
tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear},
},
tunnel::{
packet_def::ZCPacket, ring::create_ring_tunnel_pair, Tunnel, ZCPacketSink,
ZCPacketStream,
common::tests::wait_for_condition, packet_def::ZCPacket, ring::create_ring_tunnel_pair,
Tunnel, ZCPacketSink, ZCPacketStream,
},
};
@@ -426,6 +584,17 @@ pub mod tests {
}
}
fn random_string(len: usize) -> String {
use rand::distributions::Alphanumeric;
use rand::Rng;
let mut rng = rand::thread_rng();
let s: Vec<u8> = std::iter::repeat(())
.map(|()| rng.sample(Alphanumeric))
.take(len)
.collect();
String::from_utf8(s).unwrap()
}
#[tokio::test]
async fn peer_rpc_basic_test() {
struct MockTransport {
@@ -473,16 +642,35 @@ pub mod tests {
});
client_rpc_mgr.run();
let msg = random_string(8192);
let ret = client_rpc_mgr
.do_client_rpc_scoped(1, server_rpc_mgr.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), "abc".to_owned()).await;
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
println!("ret: {:?}", ret);
assert_eq!(ret.unwrap(), "hello abc");
assert_eq!(ret.unwrap(), format!("hello {}", msg));
let msg = random_string(10);
let ret = client_rpc_mgr
.do_client_rpc_scoped(1, server_rpc_mgr.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
println!("ret: {:?}", ret);
assert_eq!(ret.unwrap(), format!("hello {}", msg));
wait_for_condition(
|| async { server_rpc_mgr.peer_rpc_endpoints.is_empty() },
Duration::from_secs(10),
)
.await;
}
#[tokio::test]
@@ -516,39 +704,42 @@ pub mod tests {
};
peer_mgr_b.get_peer_rpc_mgr().run_service(1, s.serve());
let msg = random_string(16 * 1024);
let ip_list = peer_mgr_a
.get_peer_rpc_mgr()
.do_client_rpc_scoped(1, peer_mgr_b.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), "abc".to_owned()).await;
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
println!("ip_list: {:?}", ip_list);
assert_eq!(ip_list.as_ref().unwrap(), "hello abc");
assert_eq!(ip_list.unwrap(), format!("hello {}", msg));
// call again
let msg = random_string(16 * 1024);
let ip_list = peer_mgr_a
.get_peer_rpc_mgr()
.do_client_rpc_scoped(1, peer_mgr_b.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), "abcd".to_owned()).await;
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
println!("ip_list: {:?}", ip_list);
assert_eq!(ip_list.as_ref().unwrap(), "hello abcd");
assert_eq!(ip_list.unwrap(), format!("hello {}", msg));
let msg = random_string(16 * 1024);
let ip_list = peer_mgr_c
.get_peer_rpc_mgr()
.do_client_rpc_scoped(1, peer_mgr_b.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), "bcd".to_owned()).await;
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
println!("ip_list: {:?}", ip_list);
assert_eq!(ip_list.as_ref().unwrap(), "hello bcd");
assert_eq!(ip_list.unwrap(), format!("hello {}", msg));
}
#[tokio::test]
@@ -575,26 +766,33 @@ pub mod tests {
};
peer_mgr_b.get_peer_rpc_mgr().run_service(2, b.serve());
let msg = random_string(16 * 1024);
let ip_list = peer_mgr_a
.get_peer_rpc_mgr()
.do_client_rpc_scoped(1, peer_mgr_b.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), "abc".to_owned()).await;
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
assert_eq!(ip_list.unwrap(), format!("hello_a {}", msg));
assert_eq!(ip_list.as_ref().unwrap(), "hello_a abc");
let msg = random_string(16 * 1024);
let ip_list = peer_mgr_a
.get_peer_rpc_mgr()
.do_client_rpc_scoped(2, peer_mgr_b.my_peer_id(), |c| async {
let c = TestRpcServiceClient::new(tarpc::client::Config::default(), c).spawn();
let ret = c.hello(tarpc::context::current(), "abc".to_owned()).await;
let ret = c.hello(tarpc::context::current(), msg.clone()).await;
ret
})
.await;
assert_eq!(ip_list.as_ref().unwrap(), "hello_b abc");
assert_eq!(ip_list.unwrap(), format!("hello_b {}", msg));
wait_for_condition(
|| async { peer_mgr_b.get_peer_rpc_mgr().peer_rpc_endpoints.is_empty() },
Duration::from_secs(10),
)
.await;
}
}

View File

@@ -5,6 +5,18 @@ use tokio_util::bytes::Bytes;
use crate::common::{error::Error, PeerId};
#[derive(Clone, Debug)]
pub enum NextHopPolicy {
LeastHop,
LeastCost,
}
impl Default for NextHopPolicy {
fn default() -> Self {
NextHopPolicy::LeastHop
}
}
#[async_trait]
pub trait RouteInterface {
async fn list_peers(&self) -> Vec<PeerId>;
@@ -19,6 +31,31 @@ pub trait RouteInterface {
pub type RouteInterfaceBox = Box<dyn RouteInterface + Send + Sync>;
#[auto_impl::auto_impl(Box , &mut)]
pub trait RouteCostCalculatorInterface: Send + Sync {
fn begin_update(&mut self) {}
fn end_update(&mut self) {}
fn calculate_cost(&self, _src: PeerId, _dst: PeerId) -> i32 {
1
}
fn need_update(&self) -> bool {
false
}
fn dump(&self) -> String {
"All routes have cost 1".to_string()
}
}
#[derive(Clone, Debug, Default)]
pub struct DefaultRouteCostCalculator;
impl RouteCostCalculatorInterface for DefaultRouteCostCalculator {}
pub type RouteCostCalculator = Box<dyn RouteCostCalculatorInterface>;
#[async_trait]
#[auto_impl::auto_impl(Box, Arc)]
pub trait Route {
@@ -26,11 +63,21 @@ pub trait Route {
async fn close(&self);
async fn get_next_hop(&self, peer_id: PeerId) -> Option<PeerId>;
async fn get_next_hop_with_policy(
&self,
peer_id: PeerId,
_policy: NextHopPolicy,
) -> Option<PeerId> {
self.get_next_hop(peer_id).await
}
async fn list_routes(&self) -> Vec<crate::rpc::Route>;
async fn get_peer_id_by_ipv4(&self, _ipv4: &Ipv4Addr) -> Option<PeerId> {
None
}
async fn set_route_cost_fn(&self, _cost_fn: RouteCostCalculator) {}
}
pub type ArcRoute = Arc<Box<dyn Route + Send + Sync>>;

View File

@@ -1,7 +1,5 @@
use std::sync::Arc;
use futures::Future;
use crate::{
common::{error::Error, global_ctx::tests::get_mock_global_ctx, PeerId},
tunnel::ring::create_ring_tunnel_pair,
@@ -58,18 +56,3 @@ pub async fn wait_route_appear(
wait_route_appear_with_cost(peer_mgr.clone(), target_peer.my_peer_id(), None).await?;
wait_route_appear_with_cost(target_peer, peer_mgr.my_peer_id(), None).await
}
pub async fn wait_for_condition<F, FRet>(mut condition: F, timeout: std::time::Duration) -> ()
where
F: FnMut() -> FRet + Send,
FRet: Future<Output = bool>,
{
let now = std::time::Instant::now();
while now.elapsed() < timeout {
if condition().await {
return;
}
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
}
assert!(condition().await, "Timeout")
}

View File

@@ -9,17 +9,18 @@ use super::*;
use crate::{
common::{
config::{ConfigLoader, NetworkIdentity, TomlConfigLoader, VpnPortalConfig},
config::{ConfigLoader, NetworkIdentity, TomlConfigLoader},
netns::{NetNS, ROOT_NETNS_NAME},
},
instance::instance::Instance,
peers::tests::wait_for_condition,
tunnel::{
ring::RingTunnelConnector,
tcp::TcpTunnelConnector,
udp::UdpTunnelConnector,
wireguard::{WgConfig, WgTunnelConnector},
},
tunnel::common::tests::wait_for_condition,
tunnel::{ring::RingTunnelConnector, tcp::TcpTunnelConnector, udp::UdpTunnelConnector},
};
#[cfg(feature = "wireguard")]
use crate::{
common::config::VpnPortalConfig,
tunnel::wireguard::{WgConfig, WgTunnelConnector},
vpn_portal::wireguard::get_wg_config_for_portal,
};
@@ -47,11 +48,13 @@ pub fn get_inst_config(inst_name: &str, ns: Option<&str>, ipv4: &str) -> TomlCon
let config = TomlConfigLoader::default();
config.set_inst_name(inst_name.to_owned());
config.set_netns(ns.map(|s| s.to_owned()));
config.set_ipv4(ipv4.parse().unwrap());
config.set_ipv4(Some(ipv4.parse().unwrap()));
config.set_listeners(vec![
"tcp://0.0.0.0:11010".parse().unwrap(),
"udp://0.0.0.0:11010".parse().unwrap(),
"wg://0.0.0.0:11011".parse().unwrap(),
"ws://0.0.0.0:11011".parse().unwrap(),
"wss://0.0.0.0:11012".parse().unwrap(),
]);
config
}
@@ -81,6 +84,7 @@ pub async fn init_three_node(proto: &str) -> Vec<Instance> {
"udp://10.1.1.1:11010".parse().unwrap(),
));
} else if proto == "wg" {
#[cfg(feature = "wireguard")]
inst2
.get_conn_manager()
.add_connector(WgTunnelConnector::new(
@@ -94,6 +98,20 @@ pub async fn init_three_node(proto: &str) -> Vec<Instance> {
.unwrap_or_default(),
),
));
} else if proto == "ws" {
#[cfg(feature = "websocket")]
inst2
.get_conn_manager()
.add_connector(crate::tunnel::websocket::WSTunnelConnector::new(
"ws://10.1.1.1:11011".parse().unwrap(),
));
} else if proto == "wss" {
#[cfg(feature = "websocket")]
inst2
.get_conn_manager()
.add_connector(crate::tunnel::websocket::WSTunnelConnector::new(
"wss://10.1.1.1:11012".parse().unwrap(),
));
}
inst2
@@ -103,16 +121,17 @@ pub async fn init_three_node(proto: &str) -> Vec<Instance> {
));
// wait inst2 have two route.
let now = std::time::Instant::now();
loop {
if inst2.get_peer_manager().list_routes().await.len() == 2 {
break;
}
if now.elapsed().as_secs() > 5 {
panic!("wait inst2 have two route timeout");
}
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
}
wait_for_condition(
|| async { inst2.get_peer_manager().list_routes().await.len() == 2 },
Duration::from_secs(5000),
)
.await;
wait_for_condition(
|| async { inst1.get_peer_manager().list_routes().await.len() == 2 },
Duration::from_secs(5000),
)
.await;
vec![inst1, inst2, inst3]
}
@@ -140,7 +159,7 @@ async fn ping_test(from_netns: &str, target_ip: &str) -> bool {
#[rstest::rstest]
#[tokio::test]
#[serial_test::serial]
pub async fn basic_three_node_test(#[values("tcp", "udp", "wg")] proto: &str) {
pub async fn basic_three_node_test(#[values("tcp", "udp", "wg", "ws", "wss")] proto: &str) {
let insts = init_three_node(proto).await;
check_route(
@@ -168,12 +187,13 @@ pub async fn basic_three_node_test(#[values("tcp", "udp", "wg")] proto: &str) {
pub async fn tcp_proxy_three_node_test(#[values("tcp", "udp", "wg")] proto: &str) {
use crate::tunnel::{common::tests::_tunnel_pingpong_netns, tcp::TcpTunnelListener};
let insts = init_three_node(proto).await;
let mut insts = init_three_node(proto).await;
insts[2]
.get_global_ctx()
.add_proxy_cidr("10.1.2.0/24".parse().unwrap())
.unwrap();
insts[2].run_ip_proxy().await.unwrap();
assert_eq!(insts[2].get_global_ctx().get_proxy_cidrs().len(), 1);
wait_proxy_route_appear(
@@ -203,12 +223,13 @@ pub async fn tcp_proxy_three_node_test(#[values("tcp", "udp", "wg")] proto: &str
#[tokio::test]
#[serial_test::serial]
pub async fn icmp_proxy_three_node_test(#[values("tcp", "udp", "wg")] proto: &str) {
let insts = init_three_node(proto).await;
let mut insts = init_three_node(proto).await;
insts[2]
.get_global_ctx()
.add_proxy_cidr("10.1.2.0/24".parse().unwrap())
.unwrap();
insts[2].run_ip_proxy().await.unwrap();
assert_eq!(insts[2].get_global_ctx().get_proxy_cidrs().len(), 1);
wait_proxy_route_appear(
@@ -226,6 +247,7 @@ pub async fn icmp_proxy_three_node_test(#[values("tcp", "udp", "wg")] proto: &st
.await;
}
#[cfg(feature = "wireguard")]
#[rstest::rstest]
#[tokio::test]
#[serial_test::serial]
@@ -298,12 +320,13 @@ pub async fn proxy_three_node_disconnect_test(#[values("tcp", "wg")] proto: &str
pub async fn udp_proxy_three_node_test(#[values("tcp", "udp", "wg")] proto: &str) {
use crate::tunnel::{common::tests::_tunnel_pingpong_netns, udp::UdpTunnelListener};
let insts = init_three_node(proto).await;
let mut insts = init_three_node(proto).await;
insts[2]
.get_global_ctx()
.add_proxy_cidr("10.1.2.0/24".parse().unwrap())
.unwrap();
insts[2].run_ip_proxy().await.unwrap();
assert_eq!(insts[2].get_global_ctx().get_proxy_cidrs().len(), 1);
wait_proxy_route_appear(
@@ -478,6 +501,7 @@ fn run_wireguard_client(
Ok(())
}
#[cfg(feature = "wireguard")]
#[tokio::test]
#[serial_test::serial]
pub async fn wireguard_vpn_portal() {

View File

@@ -146,7 +146,7 @@ where
reserve_buf(
&mut self_mut.buf,
*self_mut.max_packet_size,
*self_mut.max_packet_size * 64,
*self_mut.max_packet_size * 32,
);
let cap = self_mut.buf.capacity() - self_mut.buf.len();
@@ -419,7 +419,7 @@ pub fn reserve_buf(buf: &mut BytesMut, min_size: usize, max_size: usize) {
pub mod tests {
use std::time::Instant;
use futures::{SinkExt, StreamExt, TryStreamExt};
use futures::{Future, SinkExt, StreamExt, TryStreamExt};
use tokio_util::bytes::{BufMut, Bytes, BytesMut};
use crate::{
@@ -431,7 +431,14 @@ pub mod tests {
let (mut recv, mut send) = tunnel.split();
if !once {
recv.forward(send).await.unwrap();
while let Some(item) = recv.next().await {
let Ok(msg) = item else {
continue;
};
if let Err(_) = send.send(msg).await {
break;
}
}
} else {
let Some(ret) = recv.next().await else {
assert!(false, "recv error");
@@ -447,6 +454,8 @@ pub mod tests {
tracing::debug!(?res, "recv a msg, try echo back");
send.send(res).await.unwrap();
}
let _ = send.flush().await;
let _ = send.close().await;
tracing::warn!("echo server exit...");
}
@@ -506,7 +515,7 @@ pub mod tests {
println!("echo back: {:?}", ret);
assert_eq!(ret.payload(), Bytes::from("12345678abcdefg"));
drop(send);
send.close().await.unwrap();
if ["udp", "wg"].contains(&connector.remote_url().scheme()) {
lis.abort();
@@ -562,6 +571,7 @@ pub mod tests {
let _ = send.feed(item).await.unwrap();
}
send.close().await.unwrap();
drop(send);
drop(connector);
drop(tunnel);
@@ -576,7 +586,7 @@ pub mod tests {
pub fn enable_log() {
let filter = tracing_subscriber::EnvFilter::builder()
.with_default_directive(tracing::level_filters::LevelFilter::TRACE.into())
.with_default_directive(tracing::level_filters::LevelFilter::DEBUG.into())
.from_env()
.unwrap()
.add_directive("tarpc=error".parse().unwrap());
@@ -585,4 +595,19 @@ pub mod tests {
.with_env_filter(filter)
.init();
}
pub async fn wait_for_condition<F, FRet>(mut condition: F, timeout: std::time::Duration) -> ()
where
F: FnMut() -> FRet + Send,
FRet: Future<Output = bool>,
{
let now = std::time::Instant::now();
while now.elapsed() < timeout {
if condition().await {
return;
}
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
}
assert!(condition().await, "Timeout")
}
}

View File

@@ -0,0 +1,86 @@
use std::sync::Arc;
use rustls::pki_types::{CertificateDer, PrivateKeyDer, ServerName, UnixTime};
/// Dummy certificate verifier that treats any certificate as valid.
/// NOTE, such verification is vulnerable to MITM attacks, but convenient for testing.
#[derive(Debug)]
struct SkipServerVerification(Arc<rustls::crypto::CryptoProvider>);
impl SkipServerVerification {
fn new(provider: Arc<rustls::crypto::CryptoProvider>) -> Arc<Self> {
Arc::new(Self(provider))
}
}
impl rustls::client::danger::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &CertificateDer<'_>,
_intermediates: &[CertificateDer<'_>],
_server_name: &ServerName<'_>,
_ocsp: &[u8],
_now: UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
message: &[u8],
cert: &CertificateDer<'_>,
dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
rustls::crypto::verify_tls12_signature(
message,
cert,
dss,
&self.0.signature_verification_algorithms,
)
}
fn verify_tls13_signature(
&self,
message: &[u8],
cert: &CertificateDer<'_>,
dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
rustls::crypto::verify_tls13_signature(
message,
cert,
dss,
&self.0.signature_verification_algorithms,
)
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
self.0.signature_verification_algorithms.supported_schemes()
}
}
pub fn init_crypto_provider() {
let _ =
rustls::crypto::CryptoProvider::install_default(rustls::crypto::ring::default_provider());
}
pub fn get_insecure_tls_client_config() -> rustls::ClientConfig {
init_crypto_provider();
let provider = rustls::crypto::CryptoProvider::get_default().unwrap();
let mut config = rustls::ClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(SkipServerVerification::new(provider.clone()))
.with_no_client_auth();
config.enable_sni = false;
config.enable_early_data = false;
config
}
pub fn get_insecure_tls_cert<'a>() -> (Vec<CertificateDer<'a>>, PrivateKeyDer<'a>) {
let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap();
let cert_der = cert.serialize_der().unwrap();
let priv_key = cert.serialize_private_key_der();
let priv_key = rustls::pki_types::PrivatePkcs8KeyDer::from(priv_key);
let cert_chain = vec![cert_der.clone().into()];
(cert_chain, priv_key.into())
}

View File

@@ -17,13 +17,23 @@ pub mod common;
pub mod filter;
pub mod mpsc;
pub mod packet_def;
pub mod quic;
pub mod ring;
pub mod stats;
pub mod tcp;
pub mod udp;
#[cfg(feature = "wireguard")]
pub mod wireguard;
#[cfg(feature = "quic")]
pub mod quic;
#[cfg(feature = "websocket")]
pub mod websocket;
#[cfg(any(feature = "quic", feature = "websocket"))]
pub mod insecure_tls;
#[derive(thiserror::Error, Debug)]
pub enum TunnelError {
#[error("io error")]
@@ -58,6 +68,10 @@ pub enum TunnelError {
#[error("no dns record found")]
NoDnsRecordFound(IpVersion),
#[cfg(feature = "websocket")]
#[error("websocket error: {0}")]
WebSocketError(#[from] tokio_websockets::Error),
#[error("tunnel error: {0}")]
TunError(String),
}

View File

@@ -1,9 +1,9 @@
// this mod wrap tunnel to a mpsc tunnel, based on crossbeam_channel
use std::pin::Pin;
use std::{pin::Pin, time::Duration};
use anyhow::Context;
use tokio::task::JoinHandle;
use tokio::{task::JoinHandle, time::timeout};
use super::{packet_def::ZCPacket, Tunnel, TunnelError, ZCPacketSink, ZCPacketStream};
@@ -42,6 +42,8 @@ impl<T: Tunnel> MpscTunnel<T> {
break;
}
}
let close_ret = timeout(Duration::from_secs(5), sink.close()).await;
tracing::warn!(?close_ret, "mpsc close sink");
});
Self {

View File

@@ -59,6 +59,10 @@ pub enum PacketType {
bitflags::bitflags! {
struct PeerManagerHeaderFlags: u8 {
const ENCRYPTED = 0b0000_0001;
const LATENCY_FIRST = 0b0000_0010;
const EXIT_NODE = 0b0000_0100;
const _ = !0;
}
}
@@ -69,7 +73,8 @@ pub struct PeerManagerHeader {
pub to_peer_id: U32<DefaultEndian>,
pub packet_type: u8,
pub flags: u8,
reserved: U16<DefaultEndian>,
pub forward_counter: u8,
reserved: u8,
pub len: U32<DefaultEndian>,
}
pub const PEER_MANAGER_HEADER_SIZE: usize = std::mem::size_of::<PeerManagerHeader>();
@@ -90,6 +95,40 @@ impl PeerManagerHeader {
}
self.flags = flags.bits();
}
pub fn is_latency_first(&self) -> bool {
PeerManagerHeaderFlags::from_bits(self.flags)
.unwrap()
.contains(PeerManagerHeaderFlags::LATENCY_FIRST)
}
pub fn is_exit_node(&self) -> bool {
PeerManagerHeaderFlags::from_bits(self.flags)
.unwrap()
.contains(PeerManagerHeaderFlags::EXIT_NODE)
}
pub fn set_latency_first(&mut self, latency_first: bool) -> &mut Self {
let mut flags = PeerManagerHeaderFlags::from_bits(self.flags).unwrap();
if latency_first {
flags.insert(PeerManagerHeaderFlags::LATENCY_FIRST);
} else {
flags.remove(PeerManagerHeaderFlags::LATENCY_FIRST);
}
self.flags = flags.bits();
self
}
pub fn set_exit_node(&mut self, exit_node: bool) -> &mut Self {
let mut flags = PeerManagerHeaderFlags::from_bits(self.flags).unwrap();
if exit_node {
flags.insert(PeerManagerHeaderFlags::EXIT_NODE);
} else {
flags.remove(PeerManagerHeaderFlags::EXIT_NODE);
}
self.flags = flags.bits();
self
}
}
// reserve the space for aes tag and nonce
@@ -114,6 +153,7 @@ pub struct ZCPacketOffsets {
pub tcp_tunnel_header_offset: usize,
pub udp_tunnel_header_offset: usize,
pub wg_tunnel_header_offset: usize,
pub dummy_tunnel_header_offset: usize,
}
#[derive(Debug, Clone, Copy, PartialEq)]
@@ -126,6 +166,8 @@ pub enum ZCPacketType {
WG,
// received from local tun device, should reserve header space for tcp or udp tunnel
NIC,
// tunnel without header
DummyTunnel,
}
const PAYLOAD_OFFSET_FOR_NIC_PACKET: usize = max(
@@ -133,6 +175,9 @@ const PAYLOAD_OFFSET_FOR_NIC_PACKET: usize = max(
WG_TUNNEL_HEADER_SIZE,
) + PEER_MANAGER_HEADER_SIZE;
// UDP Tunnel: TUN MTU + 24 (Easy) + 20 (Encrypted) + 8(UDP) + 20(IP) = TUN MTU + 72
// TCP Tunnel: TUN MTU + 20 (Easy) + 20 (Encrypted) + 20(TCP) + 20(IP) = TUN MTU + 80
const INVALID_OFFSET: usize = usize::MAX;
const fn get_converted_offset(old_hdr_size: usize, new_hdr_size: usize) -> usize {
@@ -158,6 +203,7 @@ impl ZCPacketType {
TCP_TUNNEL_HEADER_SIZE,
WG_TUNNEL_HEADER_SIZE,
),
dummy_tunnel_header_offset: get_converted_offset(TCP_TUNNEL_HEADER_SIZE, 0),
},
ZCPacketType::UDP => ZCPacketOffsets {
payload_offset: UDP_TUNNEL_HEADER_SIZE + PEER_MANAGER_HEADER_SIZE,
@@ -171,6 +217,7 @@ impl ZCPacketType {
UDP_TUNNEL_HEADER_SIZE,
WG_TUNNEL_HEADER_SIZE,
),
dummy_tunnel_header_offset: get_converted_offset(UDP_TUNNEL_HEADER_SIZE, 0),
},
ZCPacketType::WG => ZCPacketOffsets {
payload_offset: WG_TUNNEL_HEADER_SIZE + PEER_MANAGER_HEADER_SIZE,
@@ -184,6 +231,7 @@ impl ZCPacketType {
UDP_TUNNEL_HEADER_SIZE,
),
wg_tunnel_header_offset: 0,
dummy_tunnel_header_offset: get_converted_offset(WG_TUNNEL_HEADER_SIZE, 0),
},
ZCPacketType::NIC => ZCPacketOffsets {
payload_offset: PAYLOAD_OFFSET_FOR_NIC_PACKET,
@@ -198,6 +246,16 @@ impl ZCPacketType {
wg_tunnel_header_offset: PAYLOAD_OFFSET_FOR_NIC_PACKET
- PEER_MANAGER_HEADER_SIZE
- WG_TUNNEL_HEADER_SIZE,
dummy_tunnel_header_offset: PAYLOAD_OFFSET_FOR_NIC_PACKET
- PEER_MANAGER_HEADER_SIZE,
},
ZCPacketType::DummyTunnel => ZCPacketOffsets {
payload_offset: PEER_MANAGER_HEADER_SIZE,
peer_manager_header_offset: 0,
tcp_tunnel_header_offset: get_converted_offset(0, TCP_TUNNEL_HEADER_SIZE),
udp_tunnel_header_offset: get_converted_offset(0, UDP_TUNNEL_HEADER_SIZE),
wg_tunnel_header_offset: get_converted_offset(0, WG_TUNNEL_HEADER_SIZE),
dummy_tunnel_header_offset: 0,
},
}
}
@@ -346,16 +404,25 @@ impl ZCPacket {
hdr.to_peer_id.set(to_peer_id);
hdr.packet_type = packet_type;
hdr.flags = 0;
hdr.forward_counter = 1;
hdr.len.set(payload_len as u32);
}
fn tunnel_payload(&self) -> &[u8] {
pub fn tunnel_payload(&self) -> &[u8] {
&self.inner[self
.packet_type
.get_packet_offsets()
.peer_manager_header_offset..]
}
pub fn tunnel_payload_bytes(mut self) -> BytesMut {
self.inner.split_off(
self.packet_type
.get_packet_offsets()
.peer_manager_header_offset,
)
}
pub fn convert_type(mut self, target_packet_type: ZCPacketType) -> Self {
if target_packet_type == self.packet_type {
return self;
@@ -377,6 +444,11 @@ impl ZCPacket {
.get_packet_offsets()
.wg_tunnel_header_offset
}
ZCPacketType::DummyTunnel => {
self.packet_type
.get_packet_offsets()
.dummy_tunnel_header_offset
}
ZCPacketType::NIC => unreachable!(),
};

View File

@@ -12,44 +12,18 @@ use crate::{
},
};
use anyhow::Context;
use quinn::{ClientConfig, Connection, Endpoint, ServerConfig};
use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Connection, Endpoint, ServerConfig};
use super::{
check_scheme_and_get_socket_addr, IpVersion, Tunnel, TunnelConnector, TunnelError,
TunnelListener,
check_scheme_and_get_socket_addr,
insecure_tls::{get_insecure_tls_cert, get_insecure_tls_client_config},
IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelListener,
};
/// Dummy certificate verifier that treats any certificate as valid.
/// NOTE, such verification is vulnerable to MITM attacks, but convenient for testing.
struct SkipServerVerification;
impl SkipServerVerification {
fn new() -> Arc<Self> {
Arc::new(Self)
}
}
impl rustls::client::ServerCertVerifier for SkipServerVerification {
fn verify_server_cert(
&self,
_end_entity: &rustls::Certificate,
_intermediates: &[rustls::Certificate],
_server_name: &rustls::ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp_response: &[u8],
_now: std::time::SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}
fn configure_client() -> ClientConfig {
let crypto = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(SkipServerVerification::new())
.with_no_client_auth();
ClientConfig::new(Arc::new(crypto))
ClientConfig::new(Arc::new(
QuicClientConfig::try_from(get_insecure_tls_client_config()).unwrap(),
))
}
/// Constructs a QUIC endpoint configured to listen for incoming connections on a certain address
@@ -68,18 +42,14 @@ pub fn make_server_endpoint(bind_addr: SocketAddr) -> Result<(Endpoint, Vec<u8>)
/// Returns default server configuration along with its certificate.
fn configure_server() -> Result<(ServerConfig, Vec<u8>), Box<dyn Error>> {
let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap();
let cert_der = cert.serialize_der().unwrap();
let priv_key = cert.serialize_private_key_der();
let priv_key = rustls::PrivateKey(priv_key);
let cert_chain = vec![rustls::Certificate(cert_der.clone())];
let (certs, key) = get_insecure_tls_cert();
let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key)?;
let mut server_config = ServerConfig::with_single_cert(certs.clone(), key.into())?;
let transport_config = Arc::get_mut(&mut server_config.transport).unwrap();
transport_config.max_concurrent_uni_streams(10_u8.into());
transport_config.max_concurrent_bidi_streams(10_u8.into());
Ok((server_config, cert_der))
Ok((server_config, certs[0].to_vec()))
}
#[allow(unused)]
@@ -148,7 +118,7 @@ impl TunnelListener for QUICTunnelListener {
};
Ok(Box::new(TunnelWrapper::new(
FramedReader::new_with_associate_data(r, 4500, Some(Box::new(arc_conn.clone()))),
FramedReader::new_with_associate_data(r, 2000, Some(Box::new(arc_conn.clone()))),
FramedWriter::new_with_associate_data(w, Some(Box::new(arc_conn))),
Some(info),
)))

View File

@@ -12,7 +12,7 @@ use super::{
IpVersion, Tunnel, TunnelError, TunnelListener,
};
const TCP_MTU_BYTES: usize = 64 * 1024;
const TCP_MTU_BYTES: usize = 2000;
#[derive(Debug)]
pub struct TcpTunnelListener {

View File

@@ -33,10 +33,10 @@ use super::{
IpVersion, Tunnel, TunnelConnCounter, TunnelError, TunnelListener, TunnelUrl,
};
pub const UDP_DATA_MTU: usize = 65000;
pub const UDP_DATA_MTU: usize = 2000;
type UdpCloseEventSender = UnboundedSender<Option<TunnelError>>;
type UdpCloseEventReceiver = UnboundedReceiver<Option<TunnelError>>;
type UdpCloseEventSender = UnboundedSender<(SocketAddr, Option<TunnelError>)>;
type UdpCloseEventReceiver = UnboundedReceiver<(SocketAddr, Option<TunnelError>)>;
fn new_udp_packet<F>(f: F, udp_body: Option<&mut [u8]>) -> ZCPacket
where
@@ -77,16 +77,16 @@ fn new_sack_packet(conn_id: u32, magic: u64) -> ZCPacket {
)
}
pub fn new_hole_punch_packet() -> ZCPacket {
pub fn new_hole_punch_packet(tid: u32, buf_len: u16) -> ZCPacket {
// generate a 128 bytes vec with random data
let mut rng = rand::rngs::StdRng::from_entropy();
let mut buf = vec![0u8; 128];
let mut buf = vec![0u8; buf_len as usize];
rng.fill(&mut buf[..]);
new_udp_packet(
|header| {
header.msg_type = UdpPacketType::HolePunch as u8;
header.conn_id.set(0);
header.len.set(0);
header.conn_id.set(tid);
header.len.set(buf_len);
},
Some(&mut buf),
)
@@ -151,6 +151,33 @@ async fn forward_from_ring_to_udp(
}
}
async fn udp_recv_from_socket_forward_task<F>(socket: Arc<UdpSocket>, f: F)
where
F: Fn(ZCPacket, SocketAddr) -> (),
{
let mut buf = BytesMut::new();
loop {
reserve_buf(&mut buf, UDP_DATA_MTU, UDP_DATA_MTU * 16);
let (dg_size, addr) = socket.recv_buf_from(&mut buf).await.unwrap();
tracing::trace!(
"udp recv packet: {:?}, buf: {:?}, size: {}",
addr,
buf,
dg_size
);
let zc_packet = match get_zcpacket_from_buf(buf.split()) {
Ok(v) => v,
Err(e) => {
tracing::warn!(?e, "udp get zc packet from buf error");
continue;
}
};
f(zc_packet, addr);
}
}
struct UdpConnection {
socket: Arc<UdpSocket>,
conn_id: u32,
@@ -173,7 +200,7 @@ impl UdpConnection {
let forward_task = tokio::spawn(async move {
let close_event_sender = close_event_sender;
let err = forward_from_ring_to_udp(ring_recv, &s, &dst_addr, conn_id).await;
if let Err(e) = close_event_sender.send(err) {
if let Err(e) = close_event_sender.send((dst_addr, err)) {
tracing::error!(?e, "udp send close event error");
}
});
@@ -186,6 +213,27 @@ impl UdpConnection {
forward_task,
}
}
pub fn handle_packet_from_remote(&self, zc_packet: ZCPacket) -> Result<(), TunnelError> {
let header = zc_packet.udp_tunnel_header().unwrap();
let conn_id = header.conn_id.get();
if header.msg_type != UdpPacketType::Data as u8 {
return Err(TunnelError::InvalidPacket("not data packet".to_owned()));
}
if self.conn_id != conn_id {
return Err(TunnelError::ConnIdNotMatch(self.conn_id, conn_id));
}
if !self.ring_sender.has_empty_slot() {
return Err(TunnelError::BufferFull);
}
self.ring_sender.push_no_check(zc_packet)?;
Ok(())
}
}
impl Drop for UdpConnection {
@@ -275,40 +323,16 @@ impl UdpTunnelListenerData {
}
}
async fn try_forward_packet(
self: &Self,
remote_addr: &SocketAddr,
conn_id: u32,
p: ZCPacket,
) -> Result<(), TunnelError> {
let Some(conn) = self.sock_map.get(remote_addr) else {
return Err(TunnelError::InternalError(
"udp connection not found".to_owned(),
));
};
if conn.conn_id != conn_id {
return Err(TunnelError::ConnIdNotMatch(conn.conn_id, conn_id));
}
if !conn.ring_sender.has_empty_slot() {
return Err(TunnelError::BufferFull);
}
conn.ring_sender.push_no_check(p)?;
Ok(())
}
async fn process_forward_packet(&self, zc_packet: ZCPacket, addr: &SocketAddr) {
fn do_forward_one_packet_to_conn(&self, zc_packet: ZCPacket, addr: SocketAddr) {
let header = zc_packet.udp_tunnel_header().unwrap();
if header.msg_type == UdpPacketType::Syn as u8 {
tokio::spawn(Self::handle_new_connect(self.clone(), *addr, zc_packet));
} else {
if let Err(e) = self
.try_forward_packet(addr, header.conn_id.get(), zc_packet)
.await
{
tokio::spawn(Self::handle_new_connect(self.clone(), addr, zc_packet));
} else if header.msg_type != UdpPacketType::HolePunch as u8 {
let Some(conn) = self.sock_map.get(&addr) else {
tracing::trace!(?header, "udp forward packet error, connection not found");
return;
};
if let Err(e) = conn.handle_packet_from_remote(zc_packet) {
tracing::trace!(?e, "udp forward packet error");
}
}
@@ -316,26 +340,10 @@ impl UdpTunnelListenerData {
async fn do_forward_task(self: Self) {
let socket = self.socket.as_ref().unwrap().clone();
let mut buf = BytesMut::new();
loop {
reserve_buf(&mut buf, UDP_DATA_MTU, UDP_DATA_MTU * 128);
let (dg_size, addr) = socket.recv_buf_from(&mut buf).await.unwrap();
tracing::trace!(
"udp recv packet: {:?}, buf: {:?}, size: {}",
addr,
buf,
dg_size
);
let zc_packet = match get_zcpacket_from_buf(buf.split()) {
Ok(v) => v,
Err(e) => {
tracing::warn!(?e, "udp get zc packet from buf error");
continue;
}
};
self.process_forward_packet(zc_packet, &addr).await;
}
udp_recv_from_socket_forward_task(socket, |zc_packet, addr| {
self.do_forward_one_packet_to_conn(zc_packet, addr);
})
.await;
}
}
@@ -346,7 +354,7 @@ pub struct UdpTunnelListener {
conn_recv: Receiver<Box<dyn Tunnel>>,
data: UdpTunnelListenerData,
forward_tasks: Arc<std::sync::Mutex<JoinSet<()>>>,
close_event_recv: UdpCloseEventReceiver,
close_event_recv: Option<UdpCloseEventReceiver>,
}
impl UdpTunnelListener {
@@ -359,7 +367,7 @@ impl UdpTunnelListener {
conn_recv,
data: UdpTunnelListenerData::new(addr, conn_send, close_event_send),
forward_tasks: Arc::new(std::sync::Mutex::new(JoinSet::new())),
close_event_recv,
close_event_recv: Some(close_event_recv),
}
}
@@ -398,6 +406,17 @@ impl TunnelListener for UdpTunnelListener {
.unwrap()
.spawn(self.data.clone().do_forward_task());
let sock_map = Arc::downgrade(&self.data.sock_map.clone());
let mut close_recv = self.close_event_recv.take().unwrap();
self.forward_tasks.lock().unwrap().spawn(async move {
while let Some((dst_addr, err)) = close_recv.recv().await {
if let Some(err) = err {
tracing::error!(?err, "udp close event error");
}
sock_map.upgrade().map(|v| v.remove(&dst_addr));
}
});
join_joinset_background(self.forward_tasks.clone(), "UdpTunnelListener".to_owned());
Ok(())
@@ -526,11 +545,10 @@ impl UdpTunnelConnector {
async fn build_tunnel(
&self,
socket: UdpSocket,
socket: Arc<UdpSocket>,
dst_addr: SocketAddr,
conn_id: u32,
) -> Result<Box<dyn super::Tunnel>, super::TunnelError> {
let socket = Arc::new(socket);
let ring_for_send_udp = Arc::new(RingTunnel::new(128));
let ring_for_recv_udp = Arc::new(RingTunnel::new(128));
tracing::debug!(
@@ -539,62 +557,44 @@ impl UdpTunnelConnector {
"udp build tunnel for connector"
);
let (close_event_send, mut close_event_recv) = tokio::sync::mpsc::unbounded_channel();
let (close_event_sender, mut close_event_recv) = tokio::sync::mpsc::unbounded_channel();
// forward from ring to udp
let socket_sender = socket.clone();
let ring_recv = RingStream::new(ring_for_send_udp.clone());
tokio::spawn(async move {
let err = forward_from_ring_to_udp(ring_recv, &socket_sender, &dst_addr, conn_id).await;
tracing::debug!(?err, "udp forward from ring to udp done");
close_event_send.send(err).unwrap();
});
let socket_recv = socket.clone();
let ring_sender = RingSink::new(ring_for_recv_udp.clone());
tokio::spawn(async move {
let mut buf = BytesMut::new();
loop {
reserve_buf(&mut buf, UDP_DATA_MTU, UDP_DATA_MTU * 128);
let ret;
let udp_conn = UdpConnection::new(
socket.clone(),
conn_id,
dst_addr,
ring_sender,
ring_recv,
close_event_sender,
);
let socket_clone = socket.clone();
tokio::spawn(
async move {
tokio::select! {
_ = close_event_recv.recv() => {
tracing::debug!("connector udp close event");
break;
return;
}
recv_res = socket_recv.recv_buf_from(&mut buf) => ret = Some(recv_res.unwrap()),
}
let (dg_size, addr) = ret.unwrap();
tracing::trace!(
"connector udp recv packet: {:?}, buf: {:?}, size: {}",
addr,
buf,
dg_size
);
let zc_packet = match get_zcpacket_from_buf(buf.split()) {
Ok(v) => v,
Err(e) => {
tracing::warn!(?e, "connector udp get zc packet from buf error");
continue;
}
};
let header = zc_packet.udp_tunnel_header().unwrap();
if header.conn_id.get() != conn_id {
tracing::trace!(
"connector udp conn id not match: {:?}, {:?}",
header.conn_id.get(),
conn_id
);
}
if header.msg_type == UdpPacketType::Data as u8 {
if let Err(e) = ring_sender.push_no_check(zc_packet) {
tracing::trace!(?e, "udp forward packet error");
_ = udp_recv_from_socket_forward_task(socket_clone, |zc_packet, addr| {
tracing::debug!(?addr, "connector udp forward task done");
if let Err(e) = udp_conn.handle_packet_from_remote(zc_packet) {
tracing::trace!(?e, ?addr, "udp forward packet error");
}
}) => {
tracing::debug!("connector udp forward task done");
return;
}
}
}
}.instrument(tracing::info_span!("udp connector forward from udp to ring", ?ring_for_recv_udp)));
.instrument(tracing::info_span!(
"udp forward from udp to ring",
?conn_id,
?dst_addr,
)),
);
Ok(Box::new(TunnelWrapper::new(
Box::new(RingStream::new(ring_for_recv_udp)),
@@ -610,13 +610,13 @@ impl UdpTunnelConnector {
pub async fn try_connect_with_socket(
&self,
socket: UdpSocket,
socket: Arc<UdpSocket>,
addr: SocketAddr,
) -> Result<Box<dyn super::Tunnel>, super::TunnelError> {
log::warn!("udp connect: {:?}", self.addr);
#[cfg(target_os = "windows")]
crate::arch::windows::disable_connection_reset(&socket)?;
crate::arch::windows::disable_connection_reset(socket.as_ref())?;
// send syn
let conn_id = rand::random();
@@ -649,7 +649,7 @@ impl UdpTunnelConnector {
UdpSocket::bind("[::]:0").await?
};
return self.try_connect_with_socket(socket, addr).await;
return self.try_connect_with_socket(Arc::new(socket), addr).await;
}
async fn connect_with_custom_bind(
@@ -666,7 +666,7 @@ impl UdpTunnelConnector {
)?;
setup_sokcet2(&socket2_socket, &bind_addr)?;
let socket = UdpSocket::from_std(socket2_socket.into())?;
futures.push(self.try_connect_with_socket(socket, addr));
futures.push(self.try_connect_with_socket(Arc::new(socket), addr));
}
wait_for_connect_futures(futures).await
}
@@ -714,7 +714,7 @@ mod tests {
check_scheme_and_get_socket_addr,
common::{
get_interface_name_by_ip,
tests::{_tunnel_bench, _tunnel_echo_server, _tunnel_pingpong},
tests::{_tunnel_bench, _tunnel_echo_server, _tunnel_pingpong, wait_for_condition},
},
TunnelConnector,
},
@@ -724,7 +724,7 @@ mod tests {
async fn udp_pingpong() {
let listener = UdpTunnelListener::new("udp://0.0.0.0:5556".parse().unwrap());
let connector = UdpTunnelConnector::new("udp://127.0.0.1:5556".parse().unwrap());
_tunnel_pingpong(listener, connector).await
_tunnel_pingpong(listener, connector).await;
}
#[tokio::test]
@@ -912,4 +912,29 @@ mod tests {
let port = listener.local_url().port().unwrap();
assert!(port > 0);
}
#[tokio::test]
async fn test_conn_counter() {
let mut listener = UdpTunnelListener::new("udp://0.0.0.0:5556".parse().unwrap());
let mut connector = UdpTunnelConnector::new("udp://127.0.0.1:5556".parse().unwrap());
tokio::spawn(async move {
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
let _c1 = connector.connect().await.unwrap();
let _c2 = connector.connect().await.unwrap();
});
let conn_counter = listener.get_conn_counter();
listener.listen().await.unwrap();
let c1 = listener.accept().await.unwrap();
assert_eq!(conn_counter.get(), 1);
let c2 = listener.accept().await.unwrap();
assert_eq!(conn_counter.get(), 2);
drop(c2);
wait_for_condition(|| async { conn_counter.get() == 1 }, Duration::from_secs(1)).await;
drop(c1);
wait_for_condition(|| async { conn_counter.get() == 0 }, Duration::from_secs(1)).await;
}
}

View File

@@ -0,0 +1,332 @@
use std::{net::SocketAddr, sync::Arc};
use anyhow::Context;
use bytes::BytesMut;
use futures::{stream::FuturesUnordered, SinkExt, StreamExt};
use tokio::net::{TcpListener, TcpSocket, TcpStream};
use tokio_rustls::TlsAcceptor;
use tokio_websockets::{ClientBuilder, Limits, MaybeTlsStream, Message};
use zerocopy::AsBytes;
use crate::{rpc::TunnelInfo, tunnel::insecure_tls::get_insecure_tls_client_config};
use super::{
common::{setup_sokcet2, wait_for_connect_futures, TunnelWrapper},
insecure_tls::{get_insecure_tls_cert, init_crypto_provider},
packet_def::{ZCPacket, ZCPacketType},
FromUrl, IpVersion, Tunnel, TunnelConnector, TunnelError, TunnelListener,
};
fn is_wss(addr: &url::Url) -> Result<bool, TunnelError> {
match addr.scheme() {
"ws" => Ok(false),
"wss" => Ok(true),
_ => Err(TunnelError::InvalidProtocol(addr.scheme().to_string())),
}
}
async fn sink_from_zc_packet<E>(msg: ZCPacket) -> Result<Message, E> {
Ok(Message::binary(msg.tunnel_payload_bytes().freeze()))
}
async fn map_from_ws_message(
msg: Result<Message, tokio_websockets::Error>,
) -> Option<Result<ZCPacket, TunnelError>> {
if msg.is_err() {
tracing::error!(?msg, "recv from websocket error");
return Some(Err(TunnelError::WebSocketError(msg.unwrap_err())));
}
let msg = msg.unwrap();
if msg.is_close() {
tracing::warn!("recv close message from websocket");
return None;
}
if !msg.is_binary() {
let msg = format!("{:?}", msg);
tracing::error!(?msg, "Invalid packet");
return Some(Err(TunnelError::InvalidPacket(msg)));
}
Some(Ok(ZCPacket::new_from_buf(
BytesMut::from(msg.into_payload().as_bytes()),
ZCPacketType::DummyTunnel,
)))
}
#[derive(Debug)]
pub struct WSTunnelListener {
addr: url::Url,
listener: Option<TcpListener>,
}
impl WSTunnelListener {
pub fn new(addr: url::Url) -> Self {
WSTunnelListener {
addr,
listener: None,
}
}
async fn try_accept(&mut self, stream: TcpStream) -> Result<Box<dyn Tunnel>, TunnelError> {
let info = TunnelInfo {
tunnel_type: self.addr.scheme().to_owned(),
local_addr: self.local_url().into(),
remote_addr: super::build_url_from_socket_addr(
&stream.peer_addr()?.to_string(),
self.addr.scheme().to_string().as_str(),
)
.into(),
};
let server_bulder = tokio_websockets::ServerBuilder::new().limits(Limits::unlimited());
let ret: Box<dyn Tunnel> = if is_wss(&self.addr)? {
init_crypto_provider();
let (certs, key) = get_insecure_tls_cert();
let config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(certs, key)
.with_context(|| "Failed to create server config")?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let stream = acceptor.accept(stream).await?;
let (write, read) = server_bulder.accept(stream).await?.split();
Box::new(TunnelWrapper::new(
read.filter_map(move |msg| map_from_ws_message(msg)),
write.with(move |msg| sink_from_zc_packet(msg)),
Some(info),
))
} else {
let (write, read) = server_bulder.accept(stream).await?.split();
Box::new(TunnelWrapper::new(
read.filter_map(move |msg| map_from_ws_message(msg)),
write.with(move |msg| sink_from_zc_packet(msg)),
Some(info),
))
};
Ok(ret)
}
}
#[async_trait::async_trait]
impl TunnelListener for WSTunnelListener {
async fn listen(&mut self) -> Result<(), TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), IpVersion::Both)?;
let socket2_socket = socket2::Socket::new(
socket2::Domain::for_address(addr),
socket2::Type::STREAM,
Some(socket2::Protocol::TCP),
)?;
setup_sokcet2(&socket2_socket, &addr)?;
let socket = TcpSocket::from_std_stream(socket2_socket.into());
self.addr
.set_port(Some(socket.local_addr()?.port()))
.unwrap();
self.listener = Some(socket.listen(1024)?);
Ok(())
}
async fn accept(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
loop {
let listener = self.listener.as_ref().unwrap();
// only fail on tcp accept error
let (stream, _) = listener.accept().await?;
stream.set_nodelay(true).unwrap();
match self.try_accept(stream).await {
Ok(tunnel) => return Ok(tunnel),
Err(e) => {
tracing::error!(?e, ?self, "Failed to accept ws/wss tunnel");
continue;
}
}
}
}
fn local_url(&self) -> url::Url {
self.addr.clone()
}
}
pub struct WSTunnelConnector {
addr: url::Url,
ip_version: IpVersion,
bind_addrs: Vec<SocketAddr>,
}
impl WSTunnelConnector {
pub fn new(addr: url::Url) -> Self {
WSTunnelConnector {
addr,
ip_version: IpVersion::Both,
bind_addrs: vec![],
}
}
async fn connect_with(
addr: url::Url,
ip_version: IpVersion,
tcp_socket: TcpSocket,
) -> Result<Box<dyn Tunnel>, TunnelError> {
let is_wss = is_wss(&addr)?;
let socket_addr = SocketAddr::from_url(addr.clone(), ip_version)?;
let host = socket_addr.ip();
let stream = tcp_socket.connect(socket_addr).await?;
let info = TunnelInfo {
tunnel_type: addr.scheme().to_owned(),
local_addr: super::build_url_from_socket_addr(
&stream.local_addr()?.to_string(),
addr.scheme().to_string().as_str(),
)
.into(),
remote_addr: addr.to_string(),
};
let c = ClientBuilder::from_uri(http::Uri::try_from(addr.to_string()).unwrap());
let stream: MaybeTlsStream<TcpStream> = if is_wss {
init_crypto_provider();
let tls_conn =
tokio_rustls::TlsConnector::from(Arc::new(get_insecure_tls_client_config()));
let stream = tls_conn
.connect(host.to_string().try_into().unwrap(), stream)
.await?;
MaybeTlsStream::Rustls(stream)
} else {
MaybeTlsStream::Plain(stream)
};
let (client, _) = c.connect_on(stream).await?;
let (write, read) = client.split();
let read = read.filter_map(move |msg| map_from_ws_message(msg));
let write = write.with(move |msg| sink_from_zc_packet(msg));
Ok(Box::new(TunnelWrapper::new(read, write, Some(info))))
}
async fn connect_with_default_bind(
&mut self,
addr: SocketAddr,
) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let socket = if addr.is_ipv4() {
TcpSocket::new_v4()?
} else {
TcpSocket::new_v6()?
};
Self::connect_with(self.addr.clone(), self.ip_version, socket).await
}
async fn connect_with_custom_bind(
&mut self,
addr: SocketAddr,
) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let futures = FuturesUnordered::new();
for bind_addr in self.bind_addrs.iter() {
tracing::info!(bind_addr = ?bind_addr, ?addr, "bind addr");
let socket2_socket = socket2::Socket::new(
socket2::Domain::for_address(addr),
socket2::Type::STREAM,
Some(socket2::Protocol::TCP),
)?;
setup_sokcet2(&socket2_socket, bind_addr)?;
let socket = TcpSocket::from_std_stream(socket2_socket.into());
futures.push(Self::connect_with(
self.addr.clone(),
self.ip_version,
socket,
))
}
wait_for_connect_futures(futures).await
}
}
#[async_trait::async_trait]
impl TunnelConnector for WSTunnelConnector {
async fn connect(&mut self) -> Result<Box<dyn Tunnel>, super::TunnelError> {
let addr = SocketAddr::from_url(self.addr.clone(), self.ip_version)?;
if self.bind_addrs.is_empty() || addr.is_ipv6() {
self.connect_with_default_bind(addr).await
} else {
self.connect_with_custom_bind(addr).await
}
}
fn remote_url(&self) -> url::Url {
self.addr.clone()
}
fn set_ip_version(&mut self, ip_version: IpVersion) {
self.ip_version = ip_version;
}
fn set_bind_addrs(&mut self, addrs: Vec<SocketAddr>) {
self.bind_addrs = addrs;
}
}
#[cfg(test)]
pub mod tests {
use crate::tunnel::common::tests::_tunnel_pingpong;
use crate::tunnel::websocket::{WSTunnelConnector, WSTunnelListener};
use crate::tunnel::{TunnelConnector, TunnelListener};
#[rstest::rstest]
#[tokio::test]
#[serial_test::serial]
async fn ws_pingpong(#[values("ws", "wss")] proto: &str) {
let listener = WSTunnelListener::new(format!("{}://0.0.0.0:25556", proto).parse().unwrap());
let connector =
WSTunnelConnector::new(format!("{}://127.0.0.1:25556", proto).parse().unwrap());
_tunnel_pingpong(listener, connector).await
}
#[rstest::rstest]
#[tokio::test]
#[serial_test::serial]
async fn ws_pingpong_bind(#[values("ws", "wss")] proto: &str) {
let listener = WSTunnelListener::new(format!("{}://0.0.0.0:25557", proto).parse().unwrap());
let mut connector =
WSTunnelConnector::new(format!("{}://127.0.0.1:25557", proto).parse().unwrap());
connector.set_bind_addrs(vec!["127.0.0.1:0".parse().unwrap()]);
_tunnel_pingpong(listener, connector).await
}
// TODO: tokio-websockets cannot correctly handle close, benchmark case is disabled
// #[rstest::rstest]
// #[tokio::test]
// #[serial_test::serial]
// async fn ws_bench(#[values("ws", "wss")] proto: &str) {
// enable_log();
// let listener = WSTunnelListener::new(format!("{}://0.0.0.0:25557", proto).parse().unwrap());
// let connector =
// WSTunnelConnector::new(format!("{}://127.0.0.1:25557", proto).parse().unwrap());
// _tunnel_bench(listener, connector).await
// }
#[tokio::test]
async fn ws_accept_wss() {
let mut listener = WSTunnelListener::new("wss://0.0.0.0:25558".parse().unwrap());
listener.listen().await.unwrap();
let j = tokio::spawn(async move {
let _ = listener.accept().await;
});
let mut connector = WSTunnelConnector::new("ws://127.0.0.1:25558".parse().unwrap());
connector.connect().await.unwrap_err();
let mut connector = WSTunnelConnector::new("wss://127.0.0.1:25558".parse().unwrap());
connector.connect().await.unwrap();
j.abort();
}
}

Some files were not shown because too many files have changed in this diff Show More