mirror of
https://mirror.suhoan.cn/https://github.com/EasyTier/EasyTier.git
synced 2025-12-15 14:17:24 +08:00
improve direct connector (#685)
* support ipv6 stun * show interface and public ip in cli node info * direct conn should keep trying unless already direct connected * peer should use conn with smallest latency * deprecate ipv6_listener, use -l instead
This commit is contained in:
@@ -1,7 +1,9 @@
|
||||
// try connect peers directly, with either its public ip or lan ip
|
||||
|
||||
use std::{
|
||||
net::SocketAddr,
|
||||
collections::HashSet,
|
||||
net::{Ipv6Addr, SocketAddr},
|
||||
str::FromStr,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
@@ -79,7 +81,6 @@ struct DstListenerUrlBlackListItem(PeerId, url::Url);
|
||||
struct DirectConnectorManagerData {
|
||||
global_ctx: ArcGlobalCtx,
|
||||
peer_manager: Arc<PeerManager>,
|
||||
dst_blacklist: timedmap::TimedMap<DstBlackListItem, ()>,
|
||||
dst_listener_blacklist: timedmap::TimedMap<DstListenerUrlBlackListItem, ()>,
|
||||
}
|
||||
|
||||
@@ -88,7 +89,6 @@ impl DirectConnectorManagerData {
|
||||
Self {
|
||||
global_ctx,
|
||||
peer_manager,
|
||||
dst_blacklist: timedmap::TimedMap::new(),
|
||||
dst_listener_blacklist: timedmap::TimedMap::new(),
|
||||
}
|
||||
}
|
||||
@@ -150,7 +150,9 @@ impl DirectConnectorManager {
|
||||
let peers = data.peer_manager.list_peers().await;
|
||||
let mut tasks = JoinSet::new();
|
||||
for peer_id in peers {
|
||||
if peer_id == my_peer_id {
|
||||
if peer_id == my_peer_id
|
||||
|| data.peer_manager.has_directly_connected_conn(peer_id)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
tasks.spawn(Self::do_try_direct_connect(data.clone(), peer_id));
|
||||
@@ -173,24 +175,13 @@ impl DirectConnectorManager {
|
||||
dst_peer_id: PeerId,
|
||||
addr: String,
|
||||
) -> Result<(), Error> {
|
||||
data.dst_blacklist.cleanup();
|
||||
if data
|
||||
.dst_blacklist
|
||||
.contains(&DstBlackListItem(dst_peer_id.clone(), addr.clone()))
|
||||
{
|
||||
tracing::debug!("try_connect_to_ip failed, addr in blacklist: {}", addr);
|
||||
return Err(Error::UrlInBlacklist);
|
||||
}
|
||||
|
||||
let connector = create_connector_by_url(&addr, &data.global_ctx).await?;
|
||||
let (peer_id, conn_id) = timeout(
|
||||
std::time::Duration::from_secs(5),
|
||||
data.peer_manager.try_connect(connector),
|
||||
std::time::Duration::from_secs(3),
|
||||
data.peer_manager.try_direct_connect(connector),
|
||||
)
|
||||
.await??;
|
||||
|
||||
// let (peer_id, conn_id) = data.peer_manager.try_connect(connector).await?;
|
||||
|
||||
if peer_id != dst_peer_id && !TESTING.load(Ordering::Relaxed) {
|
||||
tracing::info!(
|
||||
"connect to ip succ: {}, but peer id mismatch, expect: {}, actual: {}",
|
||||
@@ -204,6 +195,7 @@ impl DirectConnectorManager {
|
||||
.await?;
|
||||
return Err(Error::InvalidUrl(addr));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -214,7 +206,7 @@ impl DirectConnectorManager {
|
||||
addr: String,
|
||||
) -> Result<(), Error> {
|
||||
let mut rand_gen = rand::rngs::OsRng::default();
|
||||
let backoff_ms = vec![1000, 2000, 4000];
|
||||
let backoff_ms = vec![1000, 2000];
|
||||
let mut backoff_idx = 0;
|
||||
|
||||
loop {
|
||||
@@ -237,12 +229,6 @@ impl DirectConnectorManager {
|
||||
backoff_idx += 1;
|
||||
continue;
|
||||
} else {
|
||||
data.dst_blacklist.insert(
|
||||
DstBlackListItem(dst_peer_id.clone(), addr.clone()),
|
||||
(),
|
||||
std::time::Duration::from_secs(DIRECT_CONNECTOR_BLACKLIST_TIMEOUT_SEC),
|
||||
);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@@ -273,61 +259,43 @@ impl DirectConnectorManager {
|
||||
|
||||
tracing::debug!(?available_listeners, "got available listeners");
|
||||
|
||||
let mut listener = available_listeners.get(0).ok_or(anyhow::anyhow!(
|
||||
"peer {} have no valid listener",
|
||||
dst_peer_id
|
||||
))?;
|
||||
if available_listeners.is_empty() {
|
||||
return Err(anyhow::anyhow!("peer {} have no valid listener", dst_peer_id).into());
|
||||
}
|
||||
|
||||
// if have default listener, use it first
|
||||
listener = available_listeners
|
||||
let listener = available_listeners
|
||||
.iter()
|
||||
.find(|l| l.scheme() == data.global_ctx.get_flags().default_protocol)
|
||||
.unwrap_or(listener);
|
||||
.unwrap_or(available_listeners.get(0).unwrap());
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
let mut tasks = bounded_join_set::JoinSet::new(2);
|
||||
|
||||
let listener_host = listener.socket_addrs(|| None).unwrap().pop();
|
||||
match listener_host {
|
||||
Some(SocketAddr::V4(s_addr)) => {
|
||||
if s_addr.ip().is_unspecified() {
|
||||
ip_list.interface_ipv4s.iter().for_each(|ip| {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?ip,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for interface ipv4"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(public_ipv4) = ip_list.public_ipv4 {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr
|
||||
.set_host(Some(public_ipv4.to_string().as_str()))
|
||||
.is_ok()
|
||||
{
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?public_ipv4,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for public ipv4"
|
||||
);
|
||||
}
|
||||
}
|
||||
ip_list
|
||||
.interface_ipv4s
|
||||
.iter()
|
||||
.chain(ip_list.public_ipv4.iter())
|
||||
.for_each(|ip| {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr.set_host(Some(ip.to_string().as_str())).is_ok() {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?ip,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for interface ipv4"
|
||||
);
|
||||
}
|
||||
});
|
||||
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
@@ -338,47 +306,42 @@ impl DirectConnectorManager {
|
||||
}
|
||||
Some(SocketAddr::V6(s_addr)) => {
|
||||
if s_addr.ip().is_unspecified() {
|
||||
ip_list.interface_ipv6s.iter().for_each(|ip| {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr
|
||||
.set_host(Some(format!("[{}]", ip.to_string()).as_str()))
|
||||
.is_ok()
|
||||
{
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?ip,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for interface ipv6"
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(public_ipv6) = ip_list.public_ipv6 {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr
|
||||
.set_host(Some(format!("[{}]", public_ipv6.to_string()).as_str()))
|
||||
.is_ok()
|
||||
{
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?public_ipv6,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for public ipv6"
|
||||
);
|
||||
}
|
||||
}
|
||||
// for ipv6, only try public ip
|
||||
ip_list
|
||||
.interface_ipv6s
|
||||
.iter()
|
||||
.chain(ip_list.public_ipv6.iter())
|
||||
.filter_map(|x| Ipv6Addr::from_str(&x.to_string()).ok())
|
||||
.filter(|x| {
|
||||
TESTING.load(Ordering::Relaxed)
|
||||
|| (!x.is_loopback()
|
||||
&& !x.is_unspecified()
|
||||
&& !x.is_unique_local()
|
||||
&& !x.is_unicast_link_local()
|
||||
&& !x.is_multicast())
|
||||
})
|
||||
.collect::<HashSet<_>>()
|
||||
.iter()
|
||||
.for_each(|ip| {
|
||||
let mut addr = (*listener).clone();
|
||||
if addr
|
||||
.set_host(Some(format!("[{}]", ip.to_string()).as_str()))
|
||||
.is_ok()
|
||||
{
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
dst_peer_id.clone(),
|
||||
addr.to_string(),
|
||||
));
|
||||
} else {
|
||||
tracing::error!(
|
||||
?ip,
|
||||
?listener,
|
||||
?dst_peer_id,
|
||||
"failed to set host for public ipv6"
|
||||
);
|
||||
}
|
||||
});
|
||||
} else if !s_addr.ip().is_loopback() || TESTING.load(Ordering::Relaxed) {
|
||||
tasks.spawn(Self::try_connect_to_ip(
|
||||
data.clone(),
|
||||
@@ -430,14 +393,6 @@ impl DirectConnectorManager {
|
||||
dst_peer_id: PeerId,
|
||||
) -> Result<(), Error> {
|
||||
let peer_manager = data.peer_manager.clone();
|
||||
// check if we have direct connection with dst_peer_id
|
||||
if let Some(c) = peer_manager.list_peer_conns(dst_peer_id).await {
|
||||
// currently if we have any type of direct connection (udp or tcp), we will not try to connect
|
||||
if !c.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!("try direct connect to peer: {}", dst_peer_id);
|
||||
|
||||
let rpc_stub = peer_manager
|
||||
@@ -466,8 +421,7 @@ mod tests {
|
||||
|
||||
use crate::{
|
||||
connector::direct::{
|
||||
DirectConnectorManager, DirectConnectorManagerData, DstBlackListItem,
|
||||
DstListenerUrlBlackListItem,
|
||||
DirectConnectorManager, DirectConnectorManagerData, DstListenerUrlBlackListItem,
|
||||
},
|
||||
instance::listeners::ListenerManager,
|
||||
peers::tests::{
|
||||
@@ -526,9 +480,7 @@ mod tests {
|
||||
#[values("tcp", "udp", "wg")] proto: &str,
|
||||
#[values("true", "false")] ipv6: bool,
|
||||
) {
|
||||
if ipv6 && proto != "udp" {
|
||||
return;
|
||||
}
|
||||
TESTING.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let p_a = create_mock_peer_manager().await;
|
||||
let p_b = create_mock_peer_manager().await;
|
||||
@@ -544,14 +496,18 @@ mod tests {
|
||||
dm_a.run_as_client();
|
||||
dm_c.run_as_server();
|
||||
|
||||
let port = if proto == "wg" { 11040 } else { 11041 };
|
||||
if !ipv6 {
|
||||
let port = if proto == "wg" { 11040 } else { 11041 };
|
||||
p_c.get_global_ctx().config.set_listeners(vec![format!(
|
||||
"{}://0.0.0.0:{}",
|
||||
proto, port
|
||||
)
|
||||
.parse()
|
||||
.unwrap()]);
|
||||
} else {
|
||||
p_c.get_global_ctx()
|
||||
.config
|
||||
.set_listeners(vec![format!("{}://[::]:{}", proto, port).parse().unwrap()]);
|
||||
}
|
||||
let mut f = p_c.get_global_ctx().config.get_flags();
|
||||
f.enable_ipv6 = ipv6;
|
||||
@@ -592,9 +548,5 @@ mod tests {
|
||||
1,
|
||||
"tcp://127.0.0.1:10222".parse().unwrap()
|
||||
)));
|
||||
|
||||
assert!(data
|
||||
.dst_blacklist
|
||||
.contains(&DstBlackListItem(1, ip_list.listeners[0].to_string())));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
use std::{net::SocketAddr, sync::Arc};
|
||||
|
||||
use crate::{
|
||||
common::{error::Error, global_ctx::ArcGlobalCtx},
|
||||
tunnel::{Tunnel, TunnelConnector, TunnelError, PROTO_PORT_OFFSET},
|
||||
common::{
|
||||
error::Error,
|
||||
global_ctx::ArcGlobalCtx,
|
||||
stun::{get_default_resolver_config, resolve_txt_record},
|
||||
},
|
||||
tunnel::{IpVersion, Tunnel, TunnelConnector, TunnelError, PROTO_PORT_OFFSET},
|
||||
};
|
||||
use anyhow::Context;
|
||||
use dashmap::DashSet;
|
||||
use hickory_resolver::{
|
||||
config::{NameServerConfig, Protocol, ResolverConfig, ResolverOpts},
|
||||
config::{ResolverConfig, ResolverOpts},
|
||||
proto::rr::rdata::SRV,
|
||||
TokioAsyncResolver,
|
||||
};
|
||||
@@ -38,6 +42,7 @@ pub struct DNSTunnelConnector {
|
||||
addr: url::Url,
|
||||
bind_addrs: Vec<SocketAddr>,
|
||||
global_ctx: ArcGlobalCtx,
|
||||
ip_version: IpVersion,
|
||||
|
||||
default_resolve_config: ResolverConfig,
|
||||
default_resolve_opts: ResolverOpts,
|
||||
@@ -45,21 +50,13 @@ pub struct DNSTunnelConnector {
|
||||
|
||||
impl DNSTunnelConnector {
|
||||
pub fn new(addr: url::Url, global_ctx: ArcGlobalCtx) -> Self {
|
||||
let mut default_resolve_config = ResolverConfig::new();
|
||||
default_resolve_config.add_name_server(NameServerConfig::new(
|
||||
"223.5.5.5:53".parse().unwrap(),
|
||||
Protocol::Udp,
|
||||
));
|
||||
default_resolve_config.add_name_server(NameServerConfig::new(
|
||||
"180.184.1.1:53".parse().unwrap(),
|
||||
Protocol::Udp,
|
||||
));
|
||||
Self {
|
||||
addr,
|
||||
bind_addrs: Vec::new(),
|
||||
global_ctx,
|
||||
ip_version: IpVersion::Both,
|
||||
|
||||
default_resolve_config,
|
||||
default_resolve_config: get_default_resolver_config(),
|
||||
default_resolve_opts: ResolverOpts::default(),
|
||||
}
|
||||
}
|
||||
@@ -69,26 +66,14 @@ impl DNSTunnelConnector {
|
||||
&self,
|
||||
domain_name: &str,
|
||||
) -> Result<Box<dyn TunnelConnector>, Error> {
|
||||
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap_or(
|
||||
TokioAsyncResolver::tokio(ResolverConfig::default(), ResolverOpts::default()),
|
||||
);
|
||||
|
||||
let response = resolver.txt_lookup(domain_name).await.with_context(|| {
|
||||
format!(
|
||||
"txt_lookup failed, domain_name: {}",
|
||||
domain_name.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let txt_record = response.iter().next().with_context(|| {
|
||||
format!(
|
||||
"no txt record found, domain_name: {}",
|
||||
domain_name.to_string()
|
||||
)
|
||||
})?;
|
||||
|
||||
let txt_data = String::from_utf8_lossy(&txt_record.txt_data()[0]);
|
||||
tracing::info!(?txt_data, ?domain_name, "get txt record");
|
||||
let resolver =
|
||||
TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio(
|
||||
self.default_resolve_config.clone(),
|
||||
self.default_resolve_opts.clone(),
|
||||
));
|
||||
let txt_data = resolve_txt_record(domain_name, &resolver)
|
||||
.await
|
||||
.with_context(|| format!("resolve txt record failed, domain_name: {}", domain_name))?;
|
||||
|
||||
let candidate_urls = txt_data
|
||||
.split(" ")
|
||||
@@ -106,9 +91,9 @@ impl DNSTunnelConnector {
|
||||
)
|
||||
})?;
|
||||
|
||||
let connector = create_connector_by_url(url.as_str(), &self.global_ctx).await;
|
||||
|
||||
connector
|
||||
let mut connector = create_connector_by_url(url.as_str(), &self.global_ctx).await?;
|
||||
connector.set_ip_version(self.ip_version);
|
||||
Ok(connector)
|
||||
}
|
||||
|
||||
fn handle_one_srv_record(record: &SRV, protocol: &str) -> Result<(url::Url, u64), Error> {
|
||||
@@ -141,9 +126,11 @@ impl DNSTunnelConnector {
|
||||
) -> Result<Box<dyn TunnelConnector>, Error> {
|
||||
tracing::info!("handle_srv_record: {}", domain_name);
|
||||
|
||||
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap_or(
|
||||
TokioAsyncResolver::tokio(ResolverConfig::default(), ResolverOpts::default()),
|
||||
);
|
||||
let resolver =
|
||||
TokioAsyncResolver::tokio_from_system_conf().unwrap_or(TokioAsyncResolver::tokio(
|
||||
self.default_resolve_config.clone(),
|
||||
self.default_resolve_opts.clone(),
|
||||
));
|
||||
|
||||
let srv_domains = PROTO_PORT_OFFSET
|
||||
.iter()
|
||||
@@ -192,8 +179,9 @@ impl DNSTunnelConnector {
|
||||
)
|
||||
})?;
|
||||
|
||||
let connector = create_connector_by_url(url.as_str(), &self.global_ctx).await;
|
||||
connector
|
||||
let mut connector = create_connector_by_url(url.as_str(), &self.global_ctx).await?;
|
||||
connector.set_ip_version(self.ip_version);
|
||||
Ok(connector)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -238,6 +226,10 @@ impl super::TunnelConnector for DNSTunnelConnector {
|
||||
fn set_bind_addrs(&mut self, addrs: Vec<SocketAddr>) {
|
||||
self.bind_addrs = addrs;
|
||||
}
|
||||
|
||||
fn set_ip_version(&mut self, ip_version: IpVersion) {
|
||||
self.ip_version = ip_version;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
||||
@@ -293,7 +293,6 @@ impl ManualConnectorManager {
|
||||
ip_version: IpVersion,
|
||||
) -> Result<ReconnResult, Error> {
|
||||
let ip_collector = data.global_ctx.get_ip_collector();
|
||||
let net_ns = data.net_ns.clone();
|
||||
|
||||
connector.lock().await.set_ip_version(ip_version);
|
||||
|
||||
@@ -309,18 +308,11 @@ impl ManualConnectorManager {
|
||||
data.global_ctx.issue_event(GlobalCtxEvent::Connecting(
|
||||
connector.lock().await.remote_url().clone(),
|
||||
));
|
||||
|
||||
let _g = net_ns.guard();
|
||||
tracing::info!("reconnect try connect... conn: {:?}", connector);
|
||||
let tunnel = connector.lock().await.connect().await?;
|
||||
tracing::info!("reconnect get tunnel succ: {:?}", tunnel);
|
||||
assert_eq!(
|
||||
dead_url,
|
||||
tunnel.info().unwrap().remote_addr.unwrap().to_string(),
|
||||
"info: {:?}",
|
||||
tunnel.info()
|
||||
);
|
||||
let (peer_id, conn_id) = data.peer_manager.add_client_tunnel(tunnel).await?;
|
||||
let (peer_id, conn_id) = data
|
||||
.peer_manager
|
||||
.try_direct_connect(connector.lock().await.as_mut())
|
||||
.await?;
|
||||
tracing::info!("reconnect succ: {} {} {}", peer_id, conn_id, dead_url);
|
||||
Ok(ReconnResult {
|
||||
dead_url,
|
||||
|
||||
@@ -388,7 +388,7 @@ impl UdpHolePunchListener {
|
||||
tracing::warn!(?conn, "udp hole punching listener got peer connection");
|
||||
let peer_mgr = peer_mgr.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = peer_mgr.add_tunnel_as_server(conn).await {
|
||||
if let Err(e) = peer_mgr.add_tunnel_as_server(conn, false).await {
|
||||
tracing::error!(
|
||||
?e,
|
||||
"failed to add tunnel as server in hole punch listener"
|
||||
|
||||
Reference in New Issue
Block a user