* support proxy tcp stream with kcp to improve experience of tcp over udp
* update rust version
* make subnet proxy route metrics lower in windows.
This commit is contained in:
Sijie.Sun
2025-01-26 00:41:15 +08:00
committed by GitHub
parent 1194ee1c2d
commit 55a39491cb
19 changed files with 723 additions and 164 deletions
+5 -2
View File
@@ -8,7 +8,7 @@ edition = "2021"
authors = ["kkrainbow"]
keywords = ["vpn", "p2p", "network", "easytier"]
categories = ["network-programming", "command-line-utilities"]
rust-version = "1.77.0"
rust-version = "1.84.0"
license-file = "LICENSE"
readme = "README.md"
@@ -163,12 +163,13 @@ indexmap = { version = "~1.9.3", optional = false, features = ["std"] }
atomic-shim = "0.2.0"
smoltcp = { version = "0.11.0", optional = true, default-features = false, features = [
smoltcp = { version = "0.12.0", optional = true, default-features = false, features = [
"std",
"medium-ip",
"proto-ipv4",
"proto-ipv6",
"socket-tcp",
"socket-tcp-cubic",
"async",
] }
parking_lot = { version = "0.12.0", optional = true }
@@ -185,6 +186,8 @@ service-manager = {git = "https://github.com/chipsenkbeil/service-manager-rs.git
async-compression = { version = "0.4.17", default-features = false, features = ["zstd", "tokio"] }
kcp-sys = { git = "https://github.com/EasyTier/kcp-sys" }
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "freebsd"))'.dependencies]
machine-uid = "0.5.3"
+3
View File
@@ -30,6 +30,9 @@ pub fn gen_default_flags() -> Flags {
multi_thread: true,
data_compress_algo: CompressionAlgoPb::None.into(),
bind_device: true,
enable_kcp_proxy: false,
disable_kcp_input: true,
disable_relay_kcp: true,
}
}
+1 -1
View File
@@ -295,7 +295,7 @@ impl IfConfiguerTrait for WindowsIfConfiger {
};
run_shell_cmd(
format!(
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC 255",
"route ADD {} MASK {} 10.1.1.1 IF {} METRIC 9000",
address,
cidr_to_subnet_mask(cidr_prefix),
idx
+16
View File
@@ -313,6 +313,20 @@ struct Cli {
help = t!("core_clap.bind_device").to_string()
)]
bind_device: Option<bool>,
#[arg(
long,
help = t!("core_clap.enable_kcp_proxy").to_string(),
default_value = "false"
)]
enable_kcp_proxy: bool,
#[arg(
long,
help = t!("core_clap.enable_kcp_proxy").to_string(),
default_value = "false"
)]
disable_kcp_input: bool,
}
rust_i18n::i18n!("locales", fallback = "en");
@@ -567,6 +581,8 @@ impl TryFrom<&Cli> for TomlConfigLoader {
if let Some(bind_device) = cli.bind_device {
f.bind_device = bind_device;
}
f.enable_kcp_proxy = cli.enable_kcp_proxy;
f.disable_kcp_input = cli.disable_kcp_input;
cfg.set_flags(f);
cfg.set_exit_nodes(cli.exit_nodes.clone());
+315
View File
@@ -0,0 +1,315 @@
use std::{
net::{IpAddr, SocketAddr},
sync::Arc,
time::Duration,
};
use anyhow::Context;
use bytes::Bytes;
use kcp_sys::{
endpoint::{KcpEndpoint, KcpPacketReceiver},
packet_def::KcpPacket,
stream::KcpStream,
};
use pnet::packet::{ip::IpNextHeaderProtocols, ipv4::Ipv4Packet};
use prost::Message;
use tokio::{io::copy_bidirectional, task::JoinSet};
use super::{
tcp_proxy::{NatDstConnector, NatDstTcpConnector, TcpProxy},
CidrSet,
};
use crate::{
common::{
error::Result,
global_ctx::{ArcGlobalCtx, GlobalCtx},
},
peers::{peer_manager::PeerManager, NicPacketFilter, PeerPacketFilter},
proto::peer_rpc::KcpConnData,
tunnel::packet_def::{PacketType, PeerManagerHeader, ZCPacket},
};
struct KcpEndpointFilter {
kcp_endpoint: Arc<KcpEndpoint>,
is_src: bool,
}
#[async_trait::async_trait]
impl PeerPacketFilter for KcpEndpointFilter {
async fn try_process_packet_from_peer(&self, packet: ZCPacket) -> Option<ZCPacket> {
let t = packet.peer_manager_header().unwrap().packet_type;
if t == PacketType::KcpSrc as u8 && !self.is_src {
} else if t == PacketType::KcpDst as u8 && self.is_src {
} else {
return Some(packet);
}
let _ = self
.kcp_endpoint
.input_sender_ref()
.send(KcpPacket::from(packet.payload_bytes()))
.await;
None
}
}
#[tracing::instrument]
async fn handle_kcp_output(
peer_mgr: Arc<PeerManager>,
mut output_receiver: KcpPacketReceiver,
is_src: bool,
) {
while let Some(packet) = output_receiver.recv().await {
let dst_peer_id = if is_src {
packet.header().dst_session_id()
} else {
packet.header().src_session_id()
};
let packet_type = if is_src {
PacketType::KcpSrc as u8
} else {
PacketType::KcpDst as u8
};
let mut packet = ZCPacket::new_with_payload(&packet.inner().freeze());
packet.fill_peer_manager_hdr(peer_mgr.my_peer_id(), dst_peer_id, packet_type as u8);
if let Err(e) = peer_mgr.send_msg(packet, dst_peer_id).await {
tracing::error!("failed to send kcp packet to peer: {:?}", e);
}
}
}
#[derive(Debug, Clone)]
pub struct NatDstKcpConnector {
kcp_endpoint: Arc<KcpEndpoint>,
peer_mgr: Arc<PeerManager>,
}
#[async_trait::async_trait]
impl NatDstConnector for NatDstKcpConnector {
type DstStream = KcpStream;
async fn connect(&self, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let conn_data = KcpConnData {
dst: Some(nat_dst.into()),
};
let (dst_peers, _) = match nat_dst {
SocketAddr::V4(addr) => {
let ip = addr.ip();
self.peer_mgr.get_msg_dst_peer(&ip).await
}
SocketAddr::V6(_) => return Err(anyhow::anyhow!("ipv6 is not supported").into()),
};
tracing::trace!("kcp nat dst: {:?}, dst peers: {:?}", nat_dst, dst_peers);
if dst_peers.len() != 1 {
return Err(anyhow::anyhow!("no dst peer found for nat dst: {}", nat_dst).into());
}
let ret = self
.kcp_endpoint
.connect(
Duration::from_secs(10),
self.peer_mgr.my_peer_id(),
dst_peers[0],
Bytes::from(conn_data.encode_to_vec()),
)
.await
.with_context(|| format!("failed to connect to nat dst: {}", nat_dst.to_string()))?;
let stream = KcpStream::new(&self.kcp_endpoint, ret)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))?;
Ok(stream)
}
fn check_packet_from_peer_fast(&self, _cidr_set: &CidrSet, _global_ctx: &GlobalCtx) -> bool {
// if kcp is turned off, the filter will not be added to the pipeline
true
}
fn check_packet_from_peer(
&self,
_cidr_set: &CidrSet,
_global_ctx: &GlobalCtx,
_hdr: &PeerManagerHeader,
_ipv4: &Ipv4Packet,
) -> bool {
true
}
}
#[derive(Clone)]
struct TcpProxyForKcpSrc(Arc<TcpProxy<NatDstKcpConnector>>);
pub struct KcpProxySrc {
kcp_endpoint: Arc<KcpEndpoint>,
peer_manager: Arc<PeerManager>,
tcp_proxy: TcpProxyForKcpSrc,
tasks: JoinSet<()>,
}
#[async_trait::async_trait]
impl NicPacketFilter for TcpProxyForKcpSrc {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let ret = self.0.try_process_packet_from_nic(zc_packet).await;
if ret {
return true;
}
let Some(my_ipv4) = self.0.get_local_ip() else {
return false;
};
let data = zc_packet.payload();
let ip_packet = Ipv4Packet::new(data).unwrap();
if ip_packet.get_version() != 4
// TODO: how to support net to net kcp proxy?
|| ip_packet.get_source() != my_ipv4
|| ip_packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp
{
return false;
}
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.0.get_my_peer_id().into();
true
}
}
impl KcpProxySrc {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = KcpEndpoint::new();
kcp_endpoint.run().await;
let output_receiver = kcp_endpoint.output_receiver().unwrap();
let mut tasks = JoinSet::new();
tasks.spawn(handle_kcp_output(
peer_manager.clone(),
output_receiver,
true,
));
let kcp_endpoint = Arc::new(kcp_endpoint);
let tcp_proxy = TcpProxy::new(
peer_manager.clone(),
NatDstKcpConnector {
kcp_endpoint: kcp_endpoint.clone(),
peer_mgr: peer_manager.clone(),
},
);
Self {
kcp_endpoint,
peer_manager,
tcp_proxy: TcpProxyForKcpSrc(tcp_proxy),
tasks,
}
}
pub async fn start(&self) {
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.tcp_proxy.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(self.tcp_proxy.0.clone()))
.await;
self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(),
is_src: true,
}))
.await;
self.tcp_proxy.0.start(false).await.unwrap();
}
}
pub struct KcpProxyDst {
kcp_endpoint: Arc<KcpEndpoint>,
peer_manager: Arc<PeerManager>,
tasks: JoinSet<()>,
}
impl KcpProxyDst {
pub async fn new(peer_manager: Arc<PeerManager>) -> Self {
let mut kcp_endpoint = KcpEndpoint::new();
kcp_endpoint.run().await;
let mut tasks = JoinSet::new();
let output_receiver = kcp_endpoint.output_receiver().unwrap();
tasks.spawn(handle_kcp_output(
peer_manager.clone(),
output_receiver,
false,
));
Self {
kcp_endpoint: Arc::new(kcp_endpoint),
peer_manager,
tasks,
}
}
#[tracing::instrument(ret)]
async fn handle_one_in_stream(
mut kcp_stream: KcpStream,
global_ctx: ArcGlobalCtx,
) -> Result<()> {
let mut conn_data = kcp_stream.conn_data().clone();
let parsed_conn_data = KcpConnData::decode(&mut conn_data)
.with_context(|| format!("failed to decode kcp conn data: {:?}", conn_data))?;
let mut dst_socket: SocketAddr = parsed_conn_data
.dst
.ok_or(anyhow::anyhow!(
"failed to get dst socket from kcp conn data: {:?}",
parsed_conn_data
))?
.into();
if Some(dst_socket.ip()) == global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address())) {
dst_socket = format!("127.0.0.1:{}", dst_socket.port()).parse().unwrap();
}
tracing::debug!("kcp connect to dst socket: {:?}", dst_socket);
let _g = global_ctx.net_ns.guard();
let connector = NatDstTcpConnector {};
let mut ret = connector.connect(dst_socket).await?;
copy_bidirectional(&mut ret, &mut kcp_stream).await?;
Ok(())
}
async fn run_accept_task(&mut self) {
let kcp_endpoint = self.kcp_endpoint.clone();
let global_ctx = self.peer_manager.get_global_ctx().clone();
self.tasks.spawn(async move {
while let Ok(conn) = kcp_endpoint.accept().await {
let stream = KcpStream::new(&kcp_endpoint, conn)
.ok_or(anyhow::anyhow!("failed to create kcp stream"))
.unwrap();
let global_ctx = global_ctx.clone();
tokio::spawn(async move {
let _ = Self::handle_one_in_stream(stream, global_ctx).await;
});
}
});
}
pub async fn start(&mut self) {
self.run_accept_task().await;
self.peer_manager
.add_packet_process_pipeline(Box::new(KcpEndpointFilter {
kcp_endpoint: self.kcp_endpoint.clone(),
is_src: false,
}))
.await;
}
}
+3 -1
View File
@@ -15,8 +15,10 @@ pub mod fast_socks5;
#[cfg(feature = "socks5")]
pub mod socks5;
pub mod kcp_proxy;
#[derive(Debug)]
struct CidrSet {
pub(crate) struct CidrSet {
global_ctx: ArcGlobalCtx,
cidr_set: Arc<Mutex<Vec<cidr::IpCidr>>>,
tasks: JoinSet<()>,
+161 -50
View File
@@ -1,3 +1,4 @@
use anyhow::Context;
use cidr::Ipv4Inet;
use core::panic;
use crossbeam::atomic::AtomicCell;
@@ -11,7 +12,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4};
use std::sync::atomic::{AtomicBool, AtomicU16};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::io::copy_bidirectional;
use tokio::io::{copy_bidirectional, AsyncRead, AsyncWrite};
use tokio::net::{TcpListener, TcpSocket, TcpStream};
use tokio::sync::{mpsc, Mutex};
use tokio::task::JoinSet;
@@ -23,13 +24,74 @@ use crate::common::join_joinset_background;
use crate::peers::peer_manager::PeerManager;
use crate::peers::{NicPacketFilter, PeerPacketFilter};
use crate::tunnel::packet_def::{PacketType, ZCPacket};
use crate::tunnel::packet_def::{PacketType, PeerManagerHeader, ZCPacket};
use super::CidrSet;
#[cfg(feature = "smoltcp")]
use super::tokio_smoltcp::{self, channel_device, Net, NetConfig};
#[async_trait::async_trait]
pub(crate) trait NatDstConnector: Send + Sync + Clone + 'static {
type DstStream: AsyncRead + AsyncWrite + Unpin + Send;
async fn connect(&self, dst: SocketAddr) -> Result<Self::DstStream>;
fn check_packet_from_peer_fast(&self, cidr_set: &CidrSet, global_ctx: &GlobalCtx) -> bool;
fn check_packet_from_peer(
&self,
cidr_set: &CidrSet,
global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader,
ipv4: &Ipv4Packet,
) -> bool;
}
#[derive(Debug, Clone)]
pub struct NatDstTcpConnector;
#[async_trait::async_trait]
impl NatDstConnector for NatDstTcpConnector {
type DstStream = TcpStream;
async fn connect(&self, nat_dst: SocketAddr) -> Result<Self::DstStream> {
let socket = TcpSocket::new_v4().unwrap();
if let Err(e) = socket.set_nodelay(true) {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
Ok(
tokio::time::timeout(Duration::from_secs(10), socket.connect(nat_dst))
.await?
.with_context(|| format!("connect to nat dst failed: {:?}", nat_dst))?,
)
}
fn check_packet_from_peer_fast(&self, cidr_set: &CidrSet, global_ctx: &GlobalCtx) -> bool {
!cidr_set.is_empty() || global_ctx.enable_exit_node() || global_ctx.no_tun()
}
fn check_packet_from_peer(
&self,
cidr_set: &CidrSet,
global_ctx: &GlobalCtx,
hdr: &PeerManagerHeader,
ipv4: &Ipv4Packet,
) -> bool {
let is_exit_node = hdr.is_exit_node();
if !cidr_set.contains_v4(ipv4.get_destination())
&& !is_exit_node
&& !(global_ctx.no_tun()
&& Some(ipv4.get_destination())
== global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
{
return false;
}
true
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum NatDstEntryState {
// receive syn packet but not start connecting to dst
@@ -83,7 +145,10 @@ impl ProxyTcpStream {
}
}
pub async fn copy_bidirectional(&mut self, dst: &mut TcpStream) -> Result<()> {
pub async fn copy_bidirectional<D: AsyncRead + AsyncWrite + Unpin>(
&mut self,
dst: &mut D,
) -> Result<()> {
match self {
Self::KernelTcpStream(stream) => {
copy_bidirectional(stream, dst).await?;
@@ -176,7 +241,7 @@ type ConnSockMap = Arc<DashMap<uuid::Uuid, ArcNatDstEntry>>;
type AddrConnSockMap = Arc<DashMap<SocketAddr, ArcNatDstEntry>>;
#[derive(Debug)]
pub struct TcpProxy {
pub struct TcpProxy<C: NatDstConnector> {
global_ctx: Arc<GlobalCtx>,
peer_manager: Arc<PeerManager>,
local_port: AtomicU16,
@@ -194,10 +259,12 @@ pub struct TcpProxy {
#[cfg(feature = "smoltcp")]
smoltcp_net: Arc<Mutex<Option<Net>>>,
enable_smoltcp: Arc<AtomicBool>,
connector: C,
}
#[async_trait::async_trait]
impl PeerPacketFilter for TcpProxy {
impl<C: NatDstConnector> PeerPacketFilter for TcpProxy<C> {
async fn try_process_packet_from_peer(&self, mut packet: ZCPacket) -> Option<ZCPacket> {
if let Some(_) = self.try_handle_peer_packet(&mut packet).await {
if self
@@ -221,10 +288,10 @@ impl PeerPacketFilter for TcpProxy {
}
#[async_trait::async_trait]
impl NicPacketFilter for TcpProxy {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) {
impl<C: NatDstConnector> NicPacketFilter for TcpProxy<C> {
async fn try_process_packet_from_nic(&self, zc_packet: &mut ZCPacket) -> bool {
let Some(my_ipv4) = self.get_local_ip() else {
return;
return false;
};
let data = zc_packet.payload();
@@ -233,25 +300,33 @@ impl NicPacketFilter for TcpProxy {
|| ip_packet.get_source() != my_ipv4
|| ip_packet.get_next_level_protocol() != IpNextHeaderProtocols::Tcp
{
return;
return false;
}
let tcp_packet = TcpPacket::new(ip_packet.payload()).unwrap();
if tcp_packet.get_source() != self.get_local_port() {
return;
return false;
}
let dst_addr = SocketAddr::V4(SocketAddrV4::new(
let mut dst_addr = SocketAddr::V4(SocketAddrV4::new(
ip_packet.get_destination(),
tcp_packet.get_destination(),
));
let mut need_transform_dst = false;
// for kcp proxy, the src ip of nat entry will be converted from my ip to fake ip
// here we need to convert it back
if !self.is_smoltcp_enabled() && dst_addr.ip() == Self::get_fake_local_ipv4(my_ipv4) {
dst_addr.set_ip(IpAddr::V4(my_ipv4));
need_transform_dst = true;
}
tracing::trace!(dst_addr = ?dst_addr, "tcp packet try find entry");
let entry = if let Some(entry) = self.addr_conn_map.get(&dst_addr) {
entry
} else {
let Some(syn_entry) = self.syn_map.get(&dst_addr) else {
return;
return false;
};
syn_entry
};
@@ -267,9 +342,15 @@ impl NicPacketFilter for TcpProxy {
.mut_peer_manager_header()
.unwrap()
.set_no_proxy(true);
if need_transform_dst {
zc_packet.mut_peer_manager_header().unwrap().to_peer_id = self.get_my_peer_id().into();
}
let mut ip_packet = MutableIpv4Packet::new(zc_packet.mut_payload()).unwrap();
ip_packet.set_source(ip);
if need_transform_dst {
ip_packet.set_destination(my_ipv4);
}
let dst = ip_packet.get_destination();
let mut tcp_packet = MutableTcpPacket::new(ip_packet.payload_mut()).unwrap();
@@ -280,12 +361,15 @@ impl NicPacketFilter for TcpProxy {
Self::update_ip_packet_checksum(&mut ip_packet);
tracing::trace!(dst_addr = ?dst_addr, nat_entry = ?nat_entry, packet = ?ip_packet, "tcp packet after modified");
true
}
}
impl TcpProxy {
pub fn new(global_ctx: Arc<GlobalCtx>, peer_manager: Arc<PeerManager>) -> Arc<Self> {
impl<C: NatDstConnector> TcpProxy<C> {
pub fn new(peer_manager: Arc<PeerManager>, connector: C) -> Arc<Self> {
let (smoltcp_stack_sender, smoltcp_stack_receiver) = mpsc::channel::<ZCPacket>(1000);
let global_ctx = peer_manager.get_global_ctx();
Arc::new(Self {
global_ctx: global_ctx.clone(),
@@ -307,6 +391,8 @@ impl TcpProxy {
smoltcp_net: Arc::new(Mutex::new(None)),
enable_smoltcp: Arc::new(AtomicBool::new(true)),
connector,
})
}
@@ -326,15 +412,17 @@ impl TcpProxy {
ip_packet.set_checksum(pnet::packet::ipv4::checksum(&ip_packet.to_immutable()));
}
pub async fn start(self: &Arc<Self>) -> Result<()> {
pub async fn start(self: &Arc<Self>, add_pipeline: bool) -> Result<()> {
self.run_syn_map_cleaner().await?;
self.run_listener().await?;
self.peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.clone()))
.await;
if add_pipeline {
self.peer_manager
.add_packet_process_pipeline(Box::new(self.clone()))
.await;
self.peer_manager
.add_nic_packet_process_pipeline(Box::new(self.clone()))
.await;
}
join_joinset_background(self.tasks.clone(), "TcpProxy".to_owned());
Ok(())
@@ -458,11 +546,26 @@ impl TcpProxy {
let syn_map = self.syn_map.clone();
let conn_map = self.conn_map.clone();
let addr_conn_map = self.addr_conn_map.clone();
let connector = self.connector.clone();
let accept_task = async move {
let conn_map = conn_map.clone();
while let Ok((tcp_stream, socket_addr)) = tcp_listener.accept().await {
while let Ok((tcp_stream, mut socket_addr)) = tcp_listener.accept().await {
let my_ip = global_ctx
.get_ipv4()
.as_ref()
.map(Ipv4Inet::address)
.unwrap_or(Ipv4Addr::UNSPECIFIED);
if socket_addr.ip() == Self::get_fake_local_ipv4(my_ip) {
socket_addr.set_ip(IpAddr::V4(my_ip));
}
let Some(entry) = syn_map.get(&socket_addr) else {
tracing::error!("tcp connection from unknown source: {:?}", socket_addr);
tracing::error!(
?my_ip,
?socket_addr,
"tcp connection from unknown source, ignore it"
);
continue;
};
tracing::info!(
@@ -483,6 +586,7 @@ impl TcpProxy {
assert!(old_nat_val.is_none());
tasks.lock().unwrap().spawn(Self::connect_to_nat_dst(
connector.clone(),
global_ctx.clone(),
tcp_stream,
conn_map.clone(),
@@ -511,6 +615,7 @@ impl TcpProxy {
}
async fn connect_to_nat_dst(
connector: C,
global_ctx: ArcGlobalCtx,
src_tcp_stream: ProxyTcpStream,
conn_map: ConnSockMap,
@@ -521,12 +626,6 @@ impl TcpProxy {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
let _guard = global_ctx.net_ns.guard();
let socket = TcpSocket::new_v4().unwrap();
if let Err(e) = socket.set_nodelay(true) {
tracing::warn!("set_nodelay failed, ignore it: {:?}", e);
}
let nat_dst = if Some(nat_entry.dst.ip())
== global_ctx.get_ipv4().map(|ip| IpAddr::V4(ip.address()))
{
@@ -537,12 +636,8 @@ impl TcpProxy {
nat_entry.dst
};
let Ok(Ok(dst_tcp_stream)) = tokio::time::timeout(
Duration::from_secs(10),
TcpSocket::new_v4().unwrap().connect(nat_dst),
)
.await
else {
let _guard = global_ctx.net_ns.guard();
let Ok(dst_tcp_stream) = connector.connect(nat_dst).await else {
tracing::error!("connect to dst failed: {:?}", nat_entry);
nat_entry.state.store(NatDstEntryState::Closed);
Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry);
@@ -567,7 +662,7 @@ impl TcpProxy {
async fn handle_nat_connection(
mut src_tcp_stream: ProxyTcpStream,
mut dst_tcp_stream: TcpStream,
mut dst_tcp_stream: C::DstStream,
conn_map: ConnSockMap,
addr_conn_map: AddrConnSockMap,
nat_entry: ArcNatDstEntry,
@@ -577,6 +672,10 @@ impl TcpProxy {
let ret = src_tcp_stream.copy_bidirectional(&mut dst_tcp_stream).await;
tracing::info!(nat_entry = ?nat_entry_clone, ret = ?ret, "nat tcp connection closed");
nat_entry_clone.state.store(NatDstEntryState::Closed);
drop(src_tcp_stream);
// sleep later so the fin packet can be processed
tokio::time::sleep(Duration::from_secs(10)).await;
Self::remove_entry_from_all_conn_map(conn_map, addr_conn_map, nat_entry_clone);
});
@@ -586,11 +685,12 @@ impl TcpProxy {
self.local_port.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn get_my_peer_id(&self) -> u32 {
self.peer_manager.my_peer_id()
}
pub fn get_local_ip(&self) -> Option<Ipv4Addr> {
if self
.enable_smoltcp
.load(std::sync::atomic::Ordering::Relaxed)
{
if self.is_smoltcp_enabled() {
Some(Ipv4Addr::new(192, 88, 99, 254))
} else {
self.global_ctx
@@ -600,17 +700,26 @@ impl TcpProxy {
}
}
pub fn is_smoltcp_enabled(&self) -> bool {
self.enable_smoltcp
.load(std::sync::atomic::Ordering::Relaxed)
}
pub fn get_fake_local_ipv4(local_ip: Ipv4Addr) -> Ipv4Addr {
let octets = local_ip.octets();
Ipv4Addr::new(octets[0], octets[1], octets[2], 0)
}
async fn try_handle_peer_packet(&self, packet: &mut ZCPacket) -> Option<()> {
if self.cidr_set.is_empty()
&& !self.global_ctx.enable_exit_node()
&& !self.global_ctx.no_tun()
if !self
.connector
.check_packet_from_peer_fast(&self.cidr_set, &self.global_ctx)
{
return None;
}
let ipv4_addr = self.get_local_ip()?;
let hdr = packet.peer_manager_header().unwrap();
let is_exit_node = hdr.is_exit_node();
let hdr = packet.peer_manager_header().unwrap().clone();
if hdr.packet_type != PacketType::Data as u8 || hdr.is_no_proxy() {
return None;
@@ -623,11 +732,9 @@ impl TcpProxy {
return None;
}
if !self.cidr_set.contains_v4(ipv4.get_destination())
&& !is_exit_node
&& !(self.global_ctx.no_tun()
&& Some(ipv4.get_destination())
== self.global_ctx.get_ipv4().as_ref().map(Ipv4Inet::address))
if !self
.connector
.check_packet_from_peer(&self.cidr_set, &self.global_ctx, &hdr, &ipv4)
{
return None;
}
@@ -658,6 +765,10 @@ impl TcpProxy {
}
let mut ip_packet = MutableIpv4Packet::new(payload_bytes).unwrap();
if !self.is_smoltcp_enabled() && source_ip == ipv4_addr {
// modify the source so the response packet can be handled by tun device
ip_packet.set_source(Self::get_fake_local_ipv4(ipv4_addr));
}
ip_packet.set_destination(ipv4_addr);
let source = ip_packet.get_source();
+1 -1
View File
@@ -43,7 +43,7 @@ pub struct BufferRxToken(Packet);
impl RxToken for BufferRxToken {
fn consume<R, F>(mut self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
F: FnOnce(&[u8]) -> R,
{
let p = &mut self.0;
let result = f(p);
+22 -4
View File
@@ -17,7 +17,8 @@ use crate::connector::direct::DirectConnectorManager;
use crate::connector::manual::{ConnectorManagerRpcService, ManualConnectorManager};
use crate::connector::udp_hole_punch::UdpHolePunchConnector;
use crate::gateway::icmp_proxy::IcmpProxy;
use crate::gateway::tcp_proxy::TcpProxy;
use crate::gateway::kcp_proxy::{KcpProxyDst, KcpProxySrc};
use crate::gateway::tcp_proxy::{NatDstTcpConnector, TcpProxy};
use crate::gateway::udp_proxy::UdpProxy;
use crate::peer_center::instance::PeerCenterInstance;
use crate::peers::peer_conn::PeerConnId;
@@ -40,7 +41,7 @@ use crate::gateway::socks5::Socks5Server;
#[derive(Clone)]
struct IpProxy {
tcp_proxy: Arc<TcpProxy>,
tcp_proxy: Arc<TcpProxy<NatDstTcpConnector>>,
icmp_proxy: Arc<IcmpProxy>,
udp_proxy: Arc<UdpProxy>,
global_ctx: ArcGlobalCtx,
@@ -49,7 +50,7 @@ struct IpProxy {
impl IpProxy {
fn new(global_ctx: ArcGlobalCtx, peer_manager: Arc<PeerManager>) -> Result<Self, Error> {
let tcp_proxy = TcpProxy::new(global_ctx.clone(), peer_manager.clone());
let tcp_proxy = TcpProxy::new(peer_manager.clone(), NatDstTcpConnector {});
let icmp_proxy = IcmpProxy::new(global_ctx.clone(), peer_manager.clone())
.with_context(|| "create icmp proxy failed")?;
let udp_proxy = UdpProxy::new(global_ctx.clone(), peer_manager.clone())
@@ -72,7 +73,7 @@ impl IpProxy {
}
self.started.store(true, Ordering::Relaxed);
self.tcp_proxy.start().await?;
self.tcp_proxy.start(true).await?;
self.icmp_proxy.start().await?;
self.udp_proxy.start().await?;
Ok(())
@@ -116,6 +117,9 @@ pub struct Instance {
ip_proxy: Option<IpProxy>,
kcp_proxy_src: Option<KcpProxySrc>,
kcp_proxy_dst: Option<KcpProxyDst>,
peer_center: Arc<PeerCenterInstance>,
vpn_portal: Arc<Mutex<Box<dyn VpnPortal>>>,
@@ -193,6 +197,8 @@ impl Instance {
udp_hole_puncher: Arc::new(Mutex::new(udp_hole_puncher)),
ip_proxy: None,
kcp_proxy_src: None,
kcp_proxy_dst: None,
peer_center,
@@ -383,6 +389,18 @@ impl Instance {
)?);
self.run_ip_proxy().await?;
if self.global_ctx.get_flags().enable_kcp_proxy {
let src_proxy = KcpProxySrc::new(self.get_peer_manager()).await;
src_proxy.start().await;
self.kcp_proxy_src = Some(src_proxy);
}
if !self.global_ctx.get_flags().disable_kcp_input {
let mut dst_proxy = KcpProxyDst::new(self.get_peer_manager()).await;
dst_proxy.start().await;
self.kcp_proxy_dst = Some(dst_proxy);
}
self.udp_hole_puncher.lock().await.run().await?;
self.peer_center.init().await;
+1 -1
View File
@@ -33,7 +33,7 @@ pub trait PeerPacketFilter {
#[async_trait::async_trait]
#[auto_impl::auto_impl(Arc)]
pub trait NicPacketFilter {
async fn try_process_packet_from_nic(&self, data: &mut ZCPacket);
async fn try_process_packet_from_nic(&self, data: &mut ZCPacket) -> bool;
}
type BoxPeerPacketFilter = Box<dyn PeerPacketFilter + Send + Sync>;
+31 -14
View File
@@ -675,7 +675,7 @@ impl PeerManager {
async fn run_nic_packet_process_pipeline(&self, data: &mut ZCPacket) {
for pipeline in self.nic_packet_process_pipeline.read().await.iter().rev() {
pipeline.try_process_packet_from_nic(data).await;
let _ = pipeline.try_process_packet_from_nic(data).await;
}
}
@@ -722,13 +722,7 @@ impl PeerManager {
}
}
pub async fn send_msg_ipv4(&self, mut msg: ZCPacket, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
tracing::trace!(
"do send_msg in peer manager, msg: {:?}, ipv4_addr: {}",
msg,
ipv4_addr
);
pub async fn get_msg_dst_peer(&self, ipv4_addr: &Ipv4Addr) -> (Vec<PeerId>, bool) {
let mut is_exit_node = false;
let mut dst_peers = vec![];
let network_length = self
@@ -736,10 +730,10 @@ impl PeerManager {
.get_ipv4()
.map(|x| x.network_length())
.unwrap_or(24);
let ipv4_inet = cidr::Ipv4Inet::new(ipv4_addr, network_length).unwrap();
let ipv4_inet = cidr::Ipv4Inet::new(*ipv4_addr, network_length).unwrap();
if ipv4_addr.is_broadcast()
|| ipv4_addr.is_multicast()
|| ipv4_addr == ipv4_inet.last_address()
|| *ipv4_addr == ipv4_inet.last_address()
{
dst_peers.extend(
self.peers
@@ -760,10 +754,15 @@ impl PeerManager {
}
}
if dst_peers.is_empty() {
tracing::info!("no peer id for ipv4: {}", ipv4_addr);
return Ok(());
}
(dst_peers, is_exit_node)
}
pub async fn send_msg_ipv4(&self, mut msg: ZCPacket, ipv4_addr: Ipv4Addr) -> Result<(), Error> {
tracing::trace!(
"do send_msg in peer manager, msg: {:?}, ipv4_addr: {}",
msg,
ipv4_addr
);
msg.fill_peer_manager_hdr(
self.my_peer_id,
@@ -771,6 +770,24 @@ impl PeerManager {
tunnel::packet_def::PacketType::Data as u8,
);
self.run_nic_packet_process_pipeline(&mut msg).await;
let cur_to_peer_id = msg.peer_manager_header().unwrap().to_peer_id.into();
if cur_to_peer_id != 0 {
return Self::send_msg_internal(
&self.peers,
&self.foreign_network_client,
msg,
cur_to_peer_id,
)
.await;
}
let (dst_peers, is_exit_node) = self.get_msg_dst_peer(&ipv4_addr).await;
if dst_peers.is_empty() {
tracing::info!("no peer id for ipv4: {}", ipv4_addr);
return Ok(());
}
let compressor = DefaultCompressor {};
compressor
.compress(&mut msg, self.data_compress_algo)
+7
View File
@@ -22,6 +22,13 @@ message FlagsInConfig {
bool multi_thread = 15;
CompressionAlgoPb data_compress_algo = 16;
bool bind_device = 17;
// should we convert all tcp streams into kcp streams
bool enable_kcp_proxy = 18;
// does this peer allow kcp input
bool disable_kcp_input = 19;
// allow relay kcp packets (for public server, this can reduce the throughput)
bool disable_relay_kcp = 20;
}
message RpcDescriptor {
+4
View File
@@ -205,3 +205,7 @@ message HandshakeRequest {
string network_name = 5;
bytes network_secret_digrest = 6;
}
message KcpConnData {
common.SocketAddr dst = 4;
}
+6
View File
@@ -61,6 +61,8 @@ pub enum PacketType {
RpcReq = 8,
RpcResp = 9,
ForeignNetworkPacket = 10,
KcpSrc = 11,
KcpDst = 12,
}
bitflags::bitflags! {
@@ -494,6 +496,10 @@ impl ZCPacket {
&self.inner[self.payload_offset()..]
}
pub fn payload_bytes(mut self) -> BytesMut {
self.inner.split_off(self.payload_offset())
}
pub fn peer_manager_header(&self) -> Option<&PeerManagerHeader> {
PeerManagerHeader::ref_from_prefix(
&self.inner[self