mirror of
https://mirror.suhoan.cn/https://github.com/EasyTier/EasyTier.git
synced 2025-12-12 12:47:25 +08:00
fix minor bugs (#936)
1. update upx to v5.0.1 to avoid mips bug. 2. use latest mimalloc. 3. fix panic in ospf route 4. potential residual conn.
This commit is contained in:
@@ -39,11 +39,11 @@ use easytier::{
|
||||
windows_service::define_windows_service!(ffi_service_main, win_service_main);
|
||||
|
||||
#[cfg(all(feature = "mimalloc", not(feature = "jemalloc")))]
|
||||
use mimalloc_rust::GlobalMiMalloc;
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[cfg(all(feature = "mimalloc", not(feature = "jemalloc")))]
|
||||
#[global_allocator]
|
||||
static GLOBAL_MIMALLOC: GlobalMiMalloc = GlobalMiMalloc;
|
||||
static GLOBAL_MIMALLOC: MiMalloc = MiMalloc;
|
||||
|
||||
#[cfg(feature = "jemalloc")]
|
||||
use jemalloc_ctl::{epoch, stats, Access as _, AsName as _};
|
||||
|
||||
@@ -118,8 +118,14 @@ impl Peer {
|
||||
}
|
||||
|
||||
pub async fn add_peer_conn(&self, mut conn: PeerConn) {
|
||||
let close_event_sender = self.close_event_sender.clone();
|
||||
let close_notifier = conn.get_close_notifier();
|
||||
let conn_info = conn.get_conn_info();
|
||||
|
||||
conn.start_recv_loop(self.packet_recv_chan.clone()).await;
|
||||
conn.start_pingpong();
|
||||
self.conns.insert(conn.get_conn_id(), Arc::new(conn));
|
||||
|
||||
let close_event_sender = self.close_event_sender.clone();
|
||||
tokio::spawn(async move {
|
||||
let conn_id = close_notifier.get_conn_id();
|
||||
if let Some(mut waiter) = close_notifier.get_waiter().await {
|
||||
@@ -130,12 +136,8 @@ impl Peer {
|
||||
}
|
||||
});
|
||||
|
||||
conn.start_recv_loop(self.packet_recv_chan.clone()).await;
|
||||
conn.start_pingpong();
|
||||
|
||||
self.global_ctx
|
||||
.issue_event(GlobalCtxEvent::PeerConnAdded(conn.get_conn_info()));
|
||||
self.conns.insert(conn.get_conn_id(), Arc::new(conn));
|
||||
.issue_event(GlobalCtxEvent::PeerConnAdded(conn_info));
|
||||
}
|
||||
|
||||
async fn select_conn(&self) -> Option<ArcPeerConn> {
|
||||
|
||||
@@ -423,7 +423,8 @@ impl PeerManager {
|
||||
let mut peer = PeerConn::new(self.my_peer_id, self.global_ctx.clone(), tunnel);
|
||||
peer.do_handshake_as_server().await?;
|
||||
if self.global_ctx.config.get_flags().private_mode
|
||||
&& peer.get_network_identity().network_name != self.global_ctx.get_network_identity().network_name
|
||||
&& peer.get_network_identity().network_name
|
||||
!= self.global_ctx.get_network_identity().network_name
|
||||
{
|
||||
return Err(Error::SecretKeyError(
|
||||
"private mode is turned on, network identity not match".to_string(),
|
||||
@@ -1085,7 +1086,8 @@ mod tests {
|
||||
use crate::{
|
||||
common::{config::Flags, global_ctx::tests::get_mock_global_ctx},
|
||||
connector::{
|
||||
create_connector_by_url, udp_hole_punch::tests::create_mock_peer_manager_with_mock_stun,
|
||||
create_connector_by_url, direct::PeerManagerForDirectConnector,
|
||||
udp_hole_punch::tests::create_mock_peer_manager_with_mock_stun,
|
||||
},
|
||||
instance::listeners::get_listener_by_url,
|
||||
peers::{
|
||||
@@ -1096,7 +1098,12 @@ mod tests {
|
||||
tests::{connect_peer_manager, wait_route_appear, wait_route_appear_with_cost},
|
||||
},
|
||||
proto::common::{CompressionAlgoPb, NatType, PeerFeatureFlag},
|
||||
tunnel::{common::tests::wait_for_condition, TunnelConnector, TunnelListener},
|
||||
tunnel::{
|
||||
common::tests::wait_for_condition,
|
||||
filter::{tests::DropSendTunnelFilter, TunnelWithFilter},
|
||||
ring::create_ring_tunnel_pair,
|
||||
TunnelConnector, TunnelListener,
|
||||
},
|
||||
};
|
||||
|
||||
use super::PeerManager;
|
||||
@@ -1336,4 +1343,36 @@ mod tests {
|
||||
.await;
|
||||
assert_eq!(ret, Some(peer_mgr_b.my_peer_id));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_client_inbound_blackhole() {
|
||||
let peer_mgr_a = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
let peer_mgr_b = create_mock_peer_manager_with_mock_stun(NatType::Unknown).await;
|
||||
|
||||
// a is client, b is server
|
||||
|
||||
let (a_ring, b_ring) = create_ring_tunnel_pair();
|
||||
let a_ring = Box::new(TunnelWithFilter::new(
|
||||
a_ring,
|
||||
DropSendTunnelFilter::new(2, 50000),
|
||||
));
|
||||
|
||||
let a_mgr_copy = peer_mgr_a.clone();
|
||||
tokio::spawn(async move {
|
||||
a_mgr_copy.add_client_tunnel(a_ring).await.unwrap();
|
||||
});
|
||||
let b_mgr_copy = peer_mgr_b.clone();
|
||||
tokio::spawn(async move {
|
||||
b_mgr_copy.add_tunnel_as_server(b_ring, true).await.unwrap();
|
||||
});
|
||||
|
||||
wait_for_condition(
|
||||
|| async {
|
||||
let peers = peer_mgr_a.list_peers().await;
|
||||
peers.is_empty()
|
||||
},
|
||||
Duration::from_secs(10),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1315,7 +1315,9 @@ impl PeerRouteServiceImpl {
|
||||
|
||||
let all_peer_ids = &conn_bitmap.peer_ids;
|
||||
for (peer_idx, (peer_id, _)) in all_peer_ids.iter().enumerate() {
|
||||
let connected = self.synced_route_info.conn_map.get(peer_id).unwrap();
|
||||
let Some(connected) = self.synced_route_info.conn_map.get(peer_id) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
for (idx, (other_peer_id, _)) in all_peer_ids.iter().enumerate() {
|
||||
if connected.0.contains(other_peer_id) {
|
||||
|
||||
Reference in New Issue
Block a user