mirror of
https://mirror.suhoan.cn/https://github.com/EasyTier/EasyTier.git
synced 2025-12-15 14:17:24 +08:00
fix self peer route info not exist when starting (#595)
This commit is contained in:
@@ -140,6 +140,12 @@ core_clap:
|
|||||||
bind_device:
|
bind_device:
|
||||||
en: "bind the connector socket to physical devices to avoid routing issues. e.g.: subnet proxy segment conflicts with a node's segment, after binding the physical device, it can communicate with the node normally."
|
en: "bind the connector socket to physical devices to avoid routing issues. e.g.: subnet proxy segment conflicts with a node's segment, after binding the physical device, it can communicate with the node normally."
|
||||||
zh-CN: "将连接器的套接字绑定到物理设备以避免路由问题。比如子网代理网段与某节点的网段冲突,绑定物理设备后可以与该节点正常通信。"
|
zh-CN: "将连接器的套接字绑定到物理设备以避免路由问题。比如子网代理网段与某节点的网段冲突,绑定物理设备后可以与该节点正常通信。"
|
||||||
|
enable_kcp_proxy:
|
||||||
|
en: "proxy tcp streams with kcp, improving the latency and throughput on the network with udp packet loss."
|
||||||
|
zh-CN: "使用 KCP 代理 TCP 流,提高在 UDP 丢包网络上的延迟和吞吐量。"
|
||||||
|
disable-kcp-input:
|
||||||
|
en: "do not allow other nodes to use kcp to proxy tcp streams to this node. when a node with kcp proxy enabled accesses this node, the original tcp connection is preserved."
|
||||||
|
zh-CN: "不允许其他节点使用 KCP 代理 TCP 流到此节点。开启 KCP 代理的节点访问此节点时,依然使用原始 TCP 连接。"
|
||||||
|
|
||||||
core_app:
|
core_app:
|
||||||
panic_backtrace_save:
|
panic_backtrace_save:
|
||||||
|
|||||||
@@ -562,6 +562,7 @@ impl TryFrom<&Cli> for TomlConfigLoader {
|
|||||||
f.relay_network_whitelist = wl.join(" ");
|
f.relay_network_whitelist = wl.join(" ");
|
||||||
}
|
}
|
||||||
f.disable_p2p = cli.disable_p2p;
|
f.disable_p2p = cli.disable_p2p;
|
||||||
|
f.disable_udp_hole_punching = cli.disable_udp_hole_punching;
|
||||||
f.relay_all_peer_rpc = cli.relay_all_peer_rpc;
|
f.relay_all_peer_rpc = cli.relay_all_peer_rpc;
|
||||||
if let Some(ipv6_listener) = cli.ipv6_listener.as_ref() {
|
if let Some(ipv6_listener) = cli.ipv6_listener.as_ref() {
|
||||||
f.ipv6_listener = ipv6_listener
|
f.ipv6_listener = ipv6_listener
|
||||||
|
|||||||
@@ -391,6 +391,13 @@ impl SyncedRouteInfo {
|
|||||||
&route_info,
|
&route_info,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
let peer_id_raw = raw_route_info
|
||||||
|
.get_field_by_name("peer_id")
|
||||||
|
.unwrap()
|
||||||
|
.as_u32()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(peer_id_raw, route_info.peer_id);
|
||||||
|
|
||||||
// time between peers may not be synchronized, so update last_update to local now.
|
// time between peers may not be synchronized, so update last_update to local now.
|
||||||
// note only last_update with larger version will be updated to local saved peer info.
|
// note only last_update with larger version will be updated to local saved peer info.
|
||||||
route_info.last_update = Some(SystemTime::now().into());
|
route_info.last_update = Some(SystemTime::now().into());
|
||||||
@@ -1644,8 +1651,6 @@ impl RouteSessionManager {
|
|||||||
) {
|
) {
|
||||||
let mut last_sync = Instant::now();
|
let mut last_sync = Instant::now();
|
||||||
loop {
|
loop {
|
||||||
let mut first_time = true;
|
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let Some(service_impl) = service_impl.clone().upgrade() else {
|
let Some(service_impl) = service_impl.clone().upgrade() else {
|
||||||
return;
|
return;
|
||||||
@@ -1655,11 +1660,6 @@ impl RouteSessionManager {
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
if first_time {
|
|
||||||
first_time = false;
|
|
||||||
service_impl.update_my_infos().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we are initiator, we should ensure the dst has the session.
|
// if we are initiator, we should ensure the dst has the session.
|
||||||
let sync_as_initiator = if last_sync.elapsed().as_secs() > 10 {
|
let sync_as_initiator = if last_sync.elapsed().as_secs() > 10 {
|
||||||
last_sync = Instant::now();
|
last_sync = Instant::now();
|
||||||
@@ -2023,6 +2023,9 @@ impl PeerRoute {
|
|||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// make sure my_peer_id is in the peer_infos.
|
||||||
|
self.service_impl.update_my_infos().await;
|
||||||
|
|
||||||
peer_rpc.rpc_server().registry().register(
|
peer_rpc.rpc_server().registry().register(
|
||||||
OspfRouteRpcServer::new(self.session_mgr.clone()),
|
OspfRouteRpcServer::new(self.session_mgr.clone()),
|
||||||
&self.global_ctx.get_network_name(),
|
&self.global_ctx.get_network_name(),
|
||||||
|
|||||||
@@ -529,7 +529,7 @@ pub async fn proxy_three_node_disconnect_test(#[values("tcp", "wg")] proto: &str
|
|||||||
// 0 down, assume last packet is recv in -0.01
|
// 0 down, assume last packet is recv in -0.01
|
||||||
// [2, 7) send ping
|
// [2, 7) send ping
|
||||||
// [4, 9) ping fail and close connection
|
// [4, 9) ping fail and close connection
|
||||||
Duration::from_millis(9300),
|
Duration::from_secs(11),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
set_link_status("net_d", true);
|
set_link_status("net_d", true);
|
||||||
|
|||||||
Reference in New Issue
Block a user