fix connection loss when traffic is huge

This commit is contained in:
sijie.sun
2024-09-26 21:59:17 +08:00
committed by Sijie.Sun
parent 7b4a01e7fb
commit 2496cf51c3
6 changed files with 54 additions and 36 deletions

View File

@@ -500,6 +500,12 @@ impl ZCPacket {
pub fn mut_inner(&mut self) -> &mut BytesMut {
&mut self.inner
}
pub fn is_lossy(&self) -> bool {
self.peer_manager_header()
.and_then(|hdr| Some(hdr.packet_type == PacketType::Data as u8))
.unwrap_or(false)
}
}
#[cfg(test)]

View File

@@ -26,7 +26,8 @@ use super::{
StreamItem, Tunnel, TunnelConnector, TunnelError, TunnelInfo, TunnelListener,
};
static RING_TUNNEL_CAP: usize = 128;
static RING_TUNNEL_CAP: usize = 64;
static RING_TUNNEL_RESERVERD_CAP: usize = 4;
type RingLock = parking_lot::Mutex<()>;
@@ -46,7 +47,7 @@ impl RingTunnel {
pub fn new(cap: usize) -> Self {
let id = Uuid::new_v4();
let ring_impl = AsyncHeapRb::new(cap);
let ring_impl = AsyncHeapRb::new(std::cmp::max(RING_TUNNEL_RESERVERD_CAP * 2, cap));
let (ring_prod_impl, ring_cons_impl) = ring_impl.split();
Self {
id: id.clone(),
@@ -121,6 +122,14 @@ impl RingSink {
}
pub fn try_send(&mut self, item: RingItem) -> Result<(), RingItem> {
let base = self.ring_prod_impl.base();
if base.occupied_len() >= base.capacity().get() - RING_TUNNEL_RESERVERD_CAP {
return Err(item);
}
self.ring_prod_impl.try_push(item)
}
pub fn force_send(&mut self, item: RingItem) -> Result<(), RingItem> {
self.ring_prod_impl.try_push(item)
}
}

View File

@@ -10,14 +10,14 @@ use std::net::SocketAddr;
use tokio::{
net::UdpSocket,
sync::mpsc::{Receiver, Sender, UnboundedReceiver, UnboundedSender},
task::{JoinHandle, JoinSet},
task::JoinSet,
};
use tracing::{instrument, Instrument};
use super::TunnelInfo;
use crate::{
common::join_joinset_background,
common::{join_joinset_background, scoped_task::ScopedTask},
tunnel::{
build_url_from_socket_addr,
common::{reserve_buf, TunnelWrapper},
@@ -190,7 +190,7 @@ struct UdpConnection {
dst_addr: SocketAddr,
ring_sender: RingSink,
forward_task: JoinHandle<()>,
forward_task: ScopedTask<()>,
}
impl UdpConnection {
@@ -209,7 +209,8 @@ impl UdpConnection {
if let Err(e) = close_event_sender.send((dst_addr, err)) {
tracing::error!(?e, "udp send close event error");
}
});
})
.into();
Self {
socket,
@@ -232,20 +233,20 @@ impl UdpConnection {
return Err(TunnelError::ConnIdNotMatch(self.conn_id, conn_id));
}
if let Err(e) = self.ring_sender.try_send(zc_packet) {
tracing::trace!(?e, "ring sender full, drop packet");
if zc_packet.is_lossy() {
if let Err(e) = self.ring_sender.try_send(zc_packet) {
tracing::trace!(?e, "ring sender full, drop lossy packet");
}
} else {
if let Err(e) = self.ring_sender.force_send(zc_packet) {
tracing::trace!(?e, "ring sender full, drop non-lossy packet");
}
}
Ok(())
}
}
impl Drop for UdpConnection {
fn drop(&mut self) {
self.forward_task.abort();
}
}
#[derive(Clone)]
struct UdpTunnelListenerData {
local_url: url::Url,
@@ -555,8 +556,8 @@ impl UdpTunnelConnector {
dst_addr: SocketAddr,
conn_id: u32,
) -> Result<Box<dyn super::Tunnel>, super::TunnelError> {
let ring_for_send_udp = Arc::new(RingTunnel::new(128));
let ring_for_recv_udp = Arc::new(RingTunnel::new(128));
let ring_for_send_udp = Arc::new(RingTunnel::new(32));
let ring_for_recv_udp = Arc::new(RingTunnel::new(32));
tracing::debug!(
?ring_for_send_udp,
?ring_for_recv_udp,