mirror of
https://mirror.suhoan.cn/https://github.com/EasyTier/EasyTier.git
synced 2025-12-14 13:47:24 +08:00
fix peer center for latency report
This commit is contained in:
@@ -117,16 +117,8 @@ service ConnectorManageRpc {
|
|||||||
rpc ManageConnector (ManageConnectorRequest) returns (ManageConnectorResponse);
|
rpc ManageConnector (ManageConnectorRequest) returns (ManageConnectorResponse);
|
||||||
}
|
}
|
||||||
|
|
||||||
enum LatencyLevel {
|
|
||||||
VeryLow = 0;
|
|
||||||
Low = 1;
|
|
||||||
Normal = 2;
|
|
||||||
High = 3;
|
|
||||||
VeryHigh = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message DirectConnectedPeerInfo {
|
message DirectConnectedPeerInfo {
|
||||||
LatencyLevel latency_level = 2;
|
int32 latency_ms = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message PeerInfoForGlobalMap {
|
message PeerInfoForGlobalMap {
|
||||||
|
|||||||
@@ -331,13 +331,7 @@ async fn main() -> Result<(), Error> {
|
|||||||
let direct_peers = v
|
let direct_peers = v
|
||||||
.direct_peers
|
.direct_peers
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(k, v)| {
|
.map(|(k, v)| format!("{}: {:?}ms", k, v.latency_ms,))
|
||||||
format!(
|
|
||||||
"{}:{:?}",
|
|
||||||
k,
|
|
||||||
LatencyLevel::try_from(v.latency_level).unwrap()
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
table_rows.push(PeerCenterTableItem {
|
table_rows.push(PeerCenterTableItem {
|
||||||
node_id: node_id.to_string(),
|
node_id: node_id.to_string(),
|
||||||
|
|||||||
@@ -1,24 +1,23 @@
|
|||||||
use std::{
|
use std::{
|
||||||
collections::hash_map::DefaultHasher,
|
collections::BTreeSet,
|
||||||
hash::{Hash, Hasher},
|
sync::Arc,
|
||||||
sync::{
|
time::{Duration, Instant, SystemTime},
|
||||||
atomic::{AtomicBool, Ordering},
|
|
||||||
Arc,
|
|
||||||
},
|
|
||||||
time::{Duration, SystemTime},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
use crossbeam::atomic::AtomicCell;
|
use crossbeam::atomic::AtomicCell;
|
||||||
use futures::Future;
|
use futures::Future;
|
||||||
use tokio::{
|
use std::sync::RwLock;
|
||||||
sync::{Mutex, RwLock},
|
use tokio::sync::Mutex;
|
||||||
task::JoinSet,
|
use tokio::task::JoinSet;
|
||||||
};
|
|
||||||
use tracing::Instrument;
|
use tracing::Instrument;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
common::PeerId,
|
common::PeerId,
|
||||||
peers::{peer_manager::PeerManager, rpc_service::PeerManagerRpcService},
|
peers::{
|
||||||
|
peer_manager::PeerManager,
|
||||||
|
route_trait::{RouteCostCalculator, RouteCostCalculatorInterface},
|
||||||
|
rpc_service::PeerManagerRpcService,
|
||||||
|
},
|
||||||
rpc::{GetGlobalPeerMapRequest, GetGlobalPeerMapResponse},
|
rpc::{GetGlobalPeerMapRequest, GetGlobalPeerMapResponse},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -34,7 +33,8 @@ struct PeerCenterBase {
|
|||||||
lock: Arc<Mutex<()>>,
|
lock: Arc<Mutex<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
static SERVICE_ID: u32 = 5;
|
// static SERVICE_ID: u32 = 5; for compatibility with the original code
|
||||||
|
static SERVICE_ID: u32 = 50;
|
||||||
|
|
||||||
struct PeridicJobCtx<T> {
|
struct PeridicJobCtx<T> {
|
||||||
peer_mgr: Arc<PeerManager>,
|
peer_mgr: Arc<PeerManager>,
|
||||||
@@ -132,7 +132,7 @@ impl PeerCenterBase {
|
|||||||
|
|
||||||
pub struct PeerCenterInstanceService {
|
pub struct PeerCenterInstanceService {
|
||||||
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
||||||
global_peer_map_digest: Arc<RwLock<Digest>>,
|
global_peer_map_digest: Arc<AtomicCell<Digest>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tonic::async_trait]
|
#[tonic::async_trait]
|
||||||
@@ -141,7 +141,7 @@ impl crate::rpc::cli::peer_center_rpc_server::PeerCenterRpc for PeerCenterInstan
|
|||||||
&self,
|
&self,
|
||||||
_request: tonic::Request<GetGlobalPeerMapRequest>,
|
_request: tonic::Request<GetGlobalPeerMapRequest>,
|
||||||
) -> Result<tonic::Response<GetGlobalPeerMapResponse>, tonic::Status> {
|
) -> Result<tonic::Response<GetGlobalPeerMapResponse>, tonic::Status> {
|
||||||
let global_peer_map = self.global_peer_map.read().await.clone();
|
let global_peer_map = self.global_peer_map.read().unwrap().clone();
|
||||||
Ok(tonic::Response::new(GetGlobalPeerMapResponse {
|
Ok(tonic::Response::new(GetGlobalPeerMapResponse {
|
||||||
global_peer_map: global_peer_map
|
global_peer_map: global_peer_map
|
||||||
.map
|
.map
|
||||||
@@ -157,7 +157,8 @@ pub struct PeerCenterInstance {
|
|||||||
|
|
||||||
client: Arc<PeerCenterBase>,
|
client: Arc<PeerCenterBase>,
|
||||||
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
||||||
global_peer_map_digest: Arc<RwLock<Digest>>,
|
global_peer_map_digest: Arc<AtomicCell<Digest>>,
|
||||||
|
global_peer_map_update_time: Arc<AtomicCell<Instant>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PeerCenterInstance {
|
impl PeerCenterInstance {
|
||||||
@@ -166,7 +167,8 @@ impl PeerCenterInstance {
|
|||||||
peer_mgr: peer_mgr.clone(),
|
peer_mgr: peer_mgr.clone(),
|
||||||
client: Arc::new(PeerCenterBase::new(peer_mgr.clone())),
|
client: Arc::new(PeerCenterBase::new(peer_mgr.clone())),
|
||||||
global_peer_map: Arc::new(RwLock::new(GlobalPeerMap::new())),
|
global_peer_map: Arc::new(RwLock::new(GlobalPeerMap::new())),
|
||||||
global_peer_map_digest: Arc::new(RwLock::new(Digest::default())),
|
global_peer_map_digest: Arc::new(AtomicCell::new(Digest::default())),
|
||||||
|
global_peer_map_update_time: Arc::new(AtomicCell::new(Instant::now())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,12 +181,14 @@ impl PeerCenterInstance {
|
|||||||
async fn init_get_global_info_job(&self) {
|
async fn init_get_global_info_job(&self) {
|
||||||
struct Ctx {
|
struct Ctx {
|
||||||
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
||||||
global_peer_map_digest: Arc<RwLock<Digest>>,
|
global_peer_map_digest: Arc<AtomicCell<Digest>>,
|
||||||
|
global_peer_map_update_time: Arc<AtomicCell<Instant>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
let ctx = Arc::new(Ctx {
|
let ctx = Arc::new(Ctx {
|
||||||
global_peer_map: self.global_peer_map.clone(),
|
global_peer_map: self.global_peer_map.clone(),
|
||||||
global_peer_map_digest: self.global_peer_map_digest.clone(),
|
global_peer_map_digest: self.global_peer_map_digest.clone(),
|
||||||
|
global_peer_map_update_time: self.global_peer_map_update_time.clone(),
|
||||||
});
|
});
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
@@ -193,10 +197,7 @@ impl PeerCenterInstance {
|
|||||||
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
|
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
|
||||||
|
|
||||||
let ret = client
|
let ret = client
|
||||||
.get_global_peer_map(
|
.get_global_peer_map(rpc_ctx, ctx.job_ctx.global_peer_map_digest.load())
|
||||||
rpc_ctx,
|
|
||||||
ctx.job_ctx.global_peer_map_digest.read().await.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let Ok(resp) = ret else {
|
let Ok(resp) = ret else {
|
||||||
@@ -217,10 +218,13 @@ impl PeerCenterInstance {
|
|||||||
resp.digest
|
resp.digest
|
||||||
);
|
);
|
||||||
|
|
||||||
*ctx.job_ctx.global_peer_map.write().await = resp.global_peer_map;
|
*ctx.job_ctx.global_peer_map.write().unwrap() = resp.global_peer_map;
|
||||||
*ctx.job_ctx.global_peer_map_digest.write().await = resp.digest;
|
ctx.job_ctx.global_peer_map_digest.store(resp.digest);
|
||||||
|
ctx.job_ctx
|
||||||
|
.global_peer_map_update_time
|
||||||
|
.store(Instant::now());
|
||||||
|
|
||||||
Ok(10000)
|
Ok(5000)
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@@ -228,67 +232,53 @@ impl PeerCenterInstance {
|
|||||||
async fn init_report_peers_job(&self) {
|
async fn init_report_peers_job(&self) {
|
||||||
struct Ctx {
|
struct Ctx {
|
||||||
service: PeerManagerRpcService,
|
service: PeerManagerRpcService,
|
||||||
need_send_peers: AtomicBool,
|
|
||||||
last_report_peers: Mutex<PeerInfoForGlobalMap>,
|
last_report_peers: Mutex<BTreeSet<PeerId>>,
|
||||||
|
|
||||||
last_center_peer: AtomicCell<PeerId>,
|
last_center_peer: AtomicCell<PeerId>,
|
||||||
|
last_report_time: AtomicCell<Instant>,
|
||||||
}
|
}
|
||||||
let ctx = Arc::new(Ctx {
|
let ctx = Arc::new(Ctx {
|
||||||
service: PeerManagerRpcService::new(self.peer_mgr.clone()),
|
service: PeerManagerRpcService::new(self.peer_mgr.clone()),
|
||||||
need_send_peers: AtomicBool::new(true),
|
last_report_peers: Mutex::new(BTreeSet::new()),
|
||||||
last_report_peers: Mutex::new(PeerInfoForGlobalMap::default()),
|
|
||||||
last_center_peer: AtomicCell::new(PeerId::default()),
|
last_center_peer: AtomicCell::new(PeerId::default()),
|
||||||
|
last_report_time: AtomicCell::new(Instant::now()),
|
||||||
});
|
});
|
||||||
|
|
||||||
self.client
|
self.client
|
||||||
.init_periodic_job(ctx, |client, ctx| async move {
|
.init_periodic_job(ctx, |client, ctx| async move {
|
||||||
let my_node_id = ctx.peer_mgr.my_peer_id();
|
let my_node_id = ctx.peer_mgr.my_peer_id();
|
||||||
|
let peers: PeerInfoForGlobalMap = ctx.job_ctx.service.list_peers().await.into();
|
||||||
|
let peer_list = peers.direct_peers.keys().map(|k| *k).collect();
|
||||||
|
let job_ctx = &ctx.job_ctx;
|
||||||
|
|
||||||
// if peers are not same in next 10 seconds, report peers to center server
|
// only report when:
|
||||||
let mut peers = PeerInfoForGlobalMap::default();
|
// 1. center peer changed
|
||||||
for _ in 1..10 {
|
// 2. last report time is more than 60 seconds
|
||||||
peers = ctx.job_ctx.service.list_peers().await.into();
|
// 3. peers changed
|
||||||
if ctx.center_peer.load() != ctx.job_ctx.last_center_peer.load() {
|
if ctx.center_peer.load() == ctx.job_ctx.last_center_peer.load()
|
||||||
// if center peer changed, report peers immediately
|
&& job_ctx.last_report_time.load().elapsed().as_secs() < 60
|
||||||
break;
|
&& *job_ctx.last_report_peers.lock().await == peer_list
|
||||||
}
|
{
|
||||||
if peers == *ctx.job_ctx.last_report_peers.lock().await {
|
return Ok(5000);
|
||||||
return Ok(3000);
|
|
||||||
}
|
|
||||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*ctx.job_ctx.last_report_peers.lock().await = peers.clone();
|
|
||||||
let mut hasher = DefaultHasher::new();
|
|
||||||
peers.hash(&mut hasher);
|
|
||||||
|
|
||||||
let peers = if ctx.job_ctx.need_send_peers.load(Ordering::Relaxed) {
|
|
||||||
Some(peers)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
let mut rpc_ctx = tarpc::context::current();
|
let mut rpc_ctx = tarpc::context::current();
|
||||||
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
|
rpc_ctx.deadline = SystemTime::now() + Duration::from_secs(3);
|
||||||
|
|
||||||
let ret = client
|
let ret = client
|
||||||
.report_peers(
|
.report_peers(rpc_ctx, my_node_id.clone(), peers)
|
||||||
rpc_ctx,
|
|
||||||
my_node_id.clone(),
|
|
||||||
peers,
|
|
||||||
hasher.finish() as Digest,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if matches!(ret.as_ref().err(), Some(Error::DigestMismatch)) {
|
if ret.is_ok() {
|
||||||
ctx.job_ctx.need_send_peers.store(true, Ordering::Relaxed);
|
ctx.job_ctx.last_center_peer.store(ctx.center_peer.load());
|
||||||
return Ok(0);
|
*ctx.job_ctx.last_report_peers.lock().await = peer_list;
|
||||||
} else if ret.is_err() {
|
ctx.job_ctx.last_report_time.store(Instant::now());
|
||||||
|
} else {
|
||||||
tracing::error!("report peers to center server got error result: {:?}", ret);
|
tracing::error!("report peers to center server got error result: {:?}", ret);
|
||||||
return Ok(500);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.job_ctx.last_center_peer.store(ctx.center_peer.load());
|
Ok(5000)
|
||||||
ctx.job_ctx.need_send_peers.store(false, Ordering::Relaxed);
|
|
||||||
Ok(3000)
|
|
||||||
})
|
})
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@@ -299,15 +289,62 @@ impl PeerCenterInstance {
|
|||||||
global_peer_map_digest: self.global_peer_map_digest.clone(),
|
global_peer_map_digest: self.global_peer_map_digest.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_cost_calculator(&self) -> RouteCostCalculator {
|
||||||
|
struct RouteCostCalculatorImpl {
|
||||||
|
global_peer_map: Arc<RwLock<GlobalPeerMap>>,
|
||||||
|
|
||||||
|
global_peer_map_clone: GlobalPeerMap,
|
||||||
|
|
||||||
|
last_update_time: AtomicCell<Instant>,
|
||||||
|
global_peer_map_update_time: Arc<AtomicCell<Instant>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RouteCostCalculatorInterface for RouteCostCalculatorImpl {
|
||||||
|
fn calculate_cost(&self, src: PeerId, dst: PeerId) -> i32 {
|
||||||
|
let ret = self
|
||||||
|
.global_peer_map_clone
|
||||||
|
.map
|
||||||
|
.get(&src)
|
||||||
|
.and_then(|src_peer_info| src_peer_info.direct_peers.get(&dst))
|
||||||
|
.and_then(|info| Some(info.latency_ms));
|
||||||
|
ret.unwrap_or(i32::MAX)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn begin_update(&mut self) {
|
||||||
|
let global_peer_map = self.global_peer_map.read().unwrap();
|
||||||
|
self.global_peer_map_clone = global_peer_map.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn end_update(&mut self) {
|
||||||
|
self.last_update_time
|
||||||
|
.store(self.global_peer_map_update_time.load());
|
||||||
|
}
|
||||||
|
|
||||||
|
fn need_update(&self) -> bool {
|
||||||
|
self.last_update_time.load() < self.global_peer_map_update_time.load()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Box::new(RouteCostCalculatorImpl {
|
||||||
|
global_peer_map: self.global_peer_map.clone(),
|
||||||
|
global_peer_map_clone: GlobalPeerMap::new(),
|
||||||
|
last_update_time: AtomicCell::new(
|
||||||
|
self.global_peer_map_update_time.load() - Duration::from_secs(1),
|
||||||
|
),
|
||||||
|
global_peer_map_update_time: self.global_peer_map_update_time.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
peer_center::server::get_global_data,
|
peer_center::server::get_global_data,
|
||||||
peers::tests::{connect_peer_manager, create_mock_peer_manager, wait_route_appear},
|
peers::tests::{
|
||||||
|
connect_peer_manager, create_mock_peer_manager, wait_for_condition, wait_route_appear,
|
||||||
|
},
|
||||||
|
tunnel::common::tests::enable_log,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
@@ -340,43 +377,64 @@ mod tests {
|
|||||||
let center_data = get_global_data(center_peer);
|
let center_data = get_global_data(center_peer);
|
||||||
|
|
||||||
// wait center_data has 3 records for 10 seconds
|
// wait center_data has 3 records for 10 seconds
|
||||||
let now = std::time::Instant::now();
|
wait_for_condition(
|
||||||
loop {
|
|| async {
|
||||||
if center_data.read().await.global_peer_map.map.len() == 3 {
|
if center_data.global_peer_map.len() == 4 {
|
||||||
println!(
|
println!("center data {:#?}", center_data.global_peer_map);
|
||||||
"center data ready, {:#?}",
|
true
|
||||||
center_data.read().await.global_peer_map
|
} else {
|
||||||
);
|
false
|
||||||
break;
|
}
|
||||||
}
|
},
|
||||||
if now.elapsed().as_secs() > 60 {
|
Duration::from_secs(10),
|
||||||
panic!("center data not ready");
|
)
|
||||||
}
|
.await;
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut digest = None;
|
let mut digest = None;
|
||||||
for pc in peer_centers.iter() {
|
for pc in peer_centers.iter() {
|
||||||
let rpc_service = pc.get_rpc_service();
|
let rpc_service = pc.get_rpc_service();
|
||||||
let now = std::time::Instant::now();
|
wait_for_condition(
|
||||||
while now.elapsed().as_secs() < 10 {
|
|| async { rpc_service.global_peer_map.read().unwrap().map.len() == 3 },
|
||||||
if rpc_service.global_peer_map.read().await.map.len() == 3 {
|
Duration::from_secs(10),
|
||||||
break;
|
)
|
||||||
}
|
.await;
|
||||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
|
||||||
}
|
|
||||||
assert_eq!(rpc_service.global_peer_map.read().await.map.len(), 3);
|
|
||||||
println!("rpc service ready, {:#?}", rpc_service.global_peer_map);
|
println!("rpc service ready, {:#?}", rpc_service.global_peer_map);
|
||||||
|
|
||||||
if digest.is_none() {
|
if digest.is_none() {
|
||||||
digest = Some(rpc_service.global_peer_map_digest.read().await.clone());
|
digest = Some(rpc_service.global_peer_map_digest.load());
|
||||||
} else {
|
} else {
|
||||||
let v = rpc_service.global_peer_map_digest.read().await;
|
let v = rpc_service.global_peer_map_digest.load();
|
||||||
assert_eq!(digest.as_ref().unwrap(), v.deref());
|
assert_eq!(digest.unwrap(), v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let mut route_cost = pc.get_cost_calculator();
|
||||||
|
assert!(route_cost.need_update());
|
||||||
|
|
||||||
|
route_cost.begin_update();
|
||||||
|
assert!(
|
||||||
|
route_cost.calculate_cost(peer_mgr_a.my_peer_id(), peer_mgr_b.my_peer_id()) < 30
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
route_cost.calculate_cost(peer_mgr_b.my_peer_id(), peer_mgr_a.my_peer_id()) < 30
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
route_cost.calculate_cost(peer_mgr_b.my_peer_id(), peer_mgr_c.my_peer_id()) < 30
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
route_cost.calculate_cost(peer_mgr_c.my_peer_id(), peer_mgr_b.my_peer_id()) < 30
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
route_cost.calculate_cost(peer_mgr_c.my_peer_id(), peer_mgr_a.my_peer_id()) > 10000
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
route_cost.calculate_cost(peer_mgr_a.my_peer_id(), peer_mgr_c.my_peer_id()) > 10000
|
||||||
|
);
|
||||||
|
route_cost.end_update();
|
||||||
|
assert!(!route_cost.need_update());
|
||||||
}
|
}
|
||||||
|
|
||||||
let global_digest = get_global_data(center_peer).read().await.digest.clone();
|
let global_digest = get_global_data(center_peer).digest.load();
|
||||||
assert_eq!(digest.as_ref().unwrap(), &global_digest);
|
assert_eq!(digest.as_ref().unwrap(), &global_digest);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,45 +1,48 @@
|
|||||||
use std::{
|
use std::{
|
||||||
|
collections::BinaryHeap,
|
||||||
hash::{Hash, Hasher},
|
hash::{Hash, Hasher},
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use crossbeam::atomic::AtomicCell;
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use tokio::{sync::RwLock, task::JoinSet};
|
use tokio::{task::JoinSet};
|
||||||
|
|
||||||
use crate::common::PeerId;
|
use crate::{common::PeerId, rpc::DirectConnectedPeerInfo};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
service::{GetGlobalPeerMapResponse, GlobalPeerMap, PeerCenterService, PeerInfoForGlobalMap},
|
service::{GetGlobalPeerMapResponse, GlobalPeerMap, PeerCenterService, PeerInfoForGlobalMap},
|
||||||
Digest, Error,
|
Digest, Error,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) struct PeerCenterServerGlobalData {
|
#[derive(Debug, Clone, PartialEq, PartialOrd, Ord, Eq, Hash)]
|
||||||
pub global_peer_map: GlobalPeerMap,
|
pub(crate) struct SrcDstPeerPair {
|
||||||
pub digest: Digest,
|
src: PeerId,
|
||||||
pub update_time: std::time::Instant,
|
dst: PeerId,
|
||||||
pub peer_update_time: DashMap<PeerId, std::time::Instant>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PeerCenterServerGlobalData {
|
#[derive(Debug, Clone)]
|
||||||
fn new() -> Self {
|
pub(crate) struct PeerCenterInfoEntry {
|
||||||
PeerCenterServerGlobalData {
|
info: DirectConnectedPeerInfo,
|
||||||
global_peer_map: GlobalPeerMap::new(),
|
update_time: std::time::Instant,
|
||||||
digest: Digest::default(),
|
}
|
||||||
update_time: std::time::Instant::now(),
|
|
||||||
peer_update_time: DashMap::new(),
|
#[derive(Default)]
|
||||||
}
|
pub(crate) struct PeerCenterServerGlobalData {
|
||||||
}
|
pub(crate) global_peer_map: DashMap<SrcDstPeerPair, PeerCenterInfoEntry>,
|
||||||
|
pub(crate) peer_report_time: DashMap<PeerId, std::time::Instant>,
|
||||||
|
pub(crate) digest: AtomicCell<Digest>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// a global unique instance for PeerCenterServer
|
// a global unique instance for PeerCenterServer
|
||||||
pub(crate) static GLOBAL_DATA: Lazy<DashMap<PeerId, Arc<RwLock<PeerCenterServerGlobalData>>>> =
|
pub(crate) static GLOBAL_DATA: Lazy<DashMap<PeerId, Arc<PeerCenterServerGlobalData>>> =
|
||||||
Lazy::new(DashMap::new);
|
Lazy::new(DashMap::new);
|
||||||
|
|
||||||
pub(crate) fn get_global_data(node_id: PeerId) -> Arc<RwLock<PeerCenterServerGlobalData>> {
|
pub(crate) fn get_global_data(node_id: PeerId) -> Arc<PeerCenterServerGlobalData> {
|
||||||
GLOBAL_DATA
|
GLOBAL_DATA
|
||||||
.entry(node_id)
|
.entry(node_id)
|
||||||
.or_insert_with(|| Arc::new(RwLock::new(PeerCenterServerGlobalData::new())))
|
.or_insert_with(|| Arc::new(PeerCenterServerGlobalData::default()))
|
||||||
.value()
|
.value()
|
||||||
.clone()
|
.clone()
|
||||||
}
|
}
|
||||||
@@ -48,8 +51,6 @@ pub(crate) fn get_global_data(node_id: PeerId) -> Arc<RwLock<PeerCenterServerGlo
|
|||||||
pub struct PeerCenterServer {
|
pub struct PeerCenterServer {
|
||||||
// every peer has its own server, so use per-struct dash map is ok.
|
// every peer has its own server, so use per-struct dash map is ok.
|
||||||
my_node_id: PeerId,
|
my_node_id: PeerId,
|
||||||
digest_map: DashMap<PeerId, Digest>,
|
|
||||||
|
|
||||||
tasks: Arc<JoinSet<()>>,
|
tasks: Arc<JoinSet<()>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,26 +66,32 @@ impl PeerCenterServer {
|
|||||||
|
|
||||||
PeerCenterServer {
|
PeerCenterServer {
|
||||||
my_node_id,
|
my_node_id,
|
||||||
digest_map: DashMap::new(),
|
|
||||||
|
|
||||||
tasks: Arc::new(tasks),
|
tasks: Arc::new(tasks),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn clean_outdated_peer(my_node_id: PeerId) {
|
async fn clean_outdated_peer(my_node_id: PeerId) {
|
||||||
let data = get_global_data(my_node_id);
|
let data = get_global_data(my_node_id);
|
||||||
let mut locked_data = data.write().await;
|
data.peer_report_time.retain(|_, v| {
|
||||||
let now = std::time::Instant::now();
|
std::time::Instant::now().duration_since(*v) < std::time::Duration::from_secs(180)
|
||||||
let mut to_remove = Vec::new();
|
});
|
||||||
for kv in locked_data.peer_update_time.iter() {
|
data.global_peer_map.retain(|_, v| {
|
||||||
if now.duration_since(*kv.value()).as_secs() > 20 {
|
std::time::Instant::now().duration_since(v.update_time)
|
||||||
to_remove.push(*kv.key());
|
< std::time::Duration::from_secs(180)
|
||||||
}
|
});
|
||||||
}
|
}
|
||||||
for peer_id in to_remove {
|
|
||||||
locked_data.global_peer_map.map.remove(&peer_id);
|
fn calc_global_digest(my_node_id: PeerId) -> Digest {
|
||||||
locked_data.peer_update_time.remove(&peer_id);
|
let data = get_global_data(my_node_id);
|
||||||
}
|
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
||||||
|
data.global_peer_map
|
||||||
|
.iter()
|
||||||
|
.map(|v| v.key().clone())
|
||||||
|
.collect::<BinaryHeap<_>>()
|
||||||
|
.into_sorted_vec()
|
||||||
|
.into_iter()
|
||||||
|
.for_each(|v| v.hash(&mut hasher));
|
||||||
|
hasher.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,39 +102,28 @@ impl PeerCenterService for PeerCenterServer {
|
|||||||
self,
|
self,
|
||||||
_: tarpc::context::Context,
|
_: tarpc::context::Context,
|
||||||
my_peer_id: PeerId,
|
my_peer_id: PeerId,
|
||||||
peers: Option<PeerInfoForGlobalMap>,
|
peers: PeerInfoForGlobalMap,
|
||||||
digest: Digest,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
tracing::trace!("receive report_peers");
|
tracing::debug!("receive report_peers");
|
||||||
|
|
||||||
let data = get_global_data(self.my_node_id);
|
let data = get_global_data(self.my_node_id);
|
||||||
let mut locked_data = data.write().await;
|
data.peer_report_time
|
||||||
locked_data
|
|
||||||
.peer_update_time
|
|
||||||
.insert(my_peer_id, std::time::Instant::now());
|
.insert(my_peer_id, std::time::Instant::now());
|
||||||
|
|
||||||
let old_digest = self.digest_map.get(&my_peer_id);
|
for (peer_id, peer_info) in peers.direct_peers {
|
||||||
// if digest match, no need to update
|
let pair = SrcDstPeerPair {
|
||||||
if let Some(old_digest) = old_digest {
|
src: my_peer_id,
|
||||||
if *old_digest == digest {
|
dst: peer_id,
|
||||||
return Ok(());
|
};
|
||||||
}
|
let entry = PeerCenterInfoEntry {
|
||||||
|
info: peer_info,
|
||||||
|
update_time: std::time::Instant::now(),
|
||||||
|
};
|
||||||
|
data.global_peer_map.insert(pair, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
if peers.is_none() {
|
data.digest
|
||||||
return Err(Error::DigestMismatch);
|
.store(PeerCenterServer::calc_global_digest(self.my_node_id));
|
||||||
}
|
|
||||||
|
|
||||||
self.digest_map.insert(my_peer_id, digest);
|
|
||||||
locked_data
|
|
||||||
.global_peer_map
|
|
||||||
.map
|
|
||||||
.insert(my_peer_id, peers.unwrap());
|
|
||||||
|
|
||||||
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
|
||||||
locked_data.global_peer_map.map.hash(&mut hasher);
|
|
||||||
locked_data.digest = hasher.finish() as Digest;
|
|
||||||
locked_data.update_time = std::time::Instant::now();
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@@ -138,15 +134,26 @@ impl PeerCenterService for PeerCenterServer {
|
|||||||
digest: Digest,
|
digest: Digest,
|
||||||
) -> Result<Option<GetGlobalPeerMapResponse>, Error> {
|
) -> Result<Option<GetGlobalPeerMapResponse>, Error> {
|
||||||
let data = get_global_data(self.my_node_id);
|
let data = get_global_data(self.my_node_id);
|
||||||
if digest == data.read().await.digest {
|
if digest == data.digest.load() && digest != 0 {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
let data = get_global_data(self.my_node_id);
|
let mut global_peer_map = GlobalPeerMap::new();
|
||||||
let locked_data = data.read().await;
|
for item in data.global_peer_map.iter() {
|
||||||
|
let (pair, entry) = item.pair();
|
||||||
|
global_peer_map
|
||||||
|
.map
|
||||||
|
.entry(pair.src)
|
||||||
|
.or_insert_with(|| PeerInfoForGlobalMap {
|
||||||
|
direct_peers: Default::default(),
|
||||||
|
})
|
||||||
|
.direct_peers
|
||||||
|
.insert(pair.dst, entry.info.clone());
|
||||||
|
}
|
||||||
|
|
||||||
Ok(Some(GetGlobalPeerMapResponse {
|
Ok(Some(GetGlobalPeerMapResponse {
|
||||||
global_peer_map: locked_data.global_peer_map.clone(),
|
global_peer_map,
|
||||||
digest: locked_data.digest,
|
digest: data.digest.load(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,24 +5,6 @@ use crate::{common::PeerId, rpc::DirectConnectedPeerInfo};
|
|||||||
use super::{Digest, Error};
|
use super::{Digest, Error};
|
||||||
use crate::rpc::PeerInfo;
|
use crate::rpc::PeerInfo;
|
||||||
|
|
||||||
pub type LatencyLevel = crate::rpc::cli::LatencyLevel;
|
|
||||||
|
|
||||||
impl LatencyLevel {
|
|
||||||
pub const fn from_latency_ms(lat_ms: u32) -> Self {
|
|
||||||
if lat_ms < 10 {
|
|
||||||
LatencyLevel::VeryLow
|
|
||||||
} else if lat_ms < 50 {
|
|
||||||
LatencyLevel::Low
|
|
||||||
} else if lat_ms < 100 {
|
|
||||||
LatencyLevel::Normal
|
|
||||||
} else if lat_ms < 200 {
|
|
||||||
LatencyLevel::High
|
|
||||||
} else {
|
|
||||||
LatencyLevel::VeryHigh
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type PeerInfoForGlobalMap = crate::rpc::cli::PeerInfoForGlobalMap;
|
pub type PeerInfoForGlobalMap = crate::rpc::cli::PeerInfoForGlobalMap;
|
||||||
|
|
||||||
impl From<Vec<PeerInfo>> for PeerInfoForGlobalMap {
|
impl From<Vec<PeerInfo>> for PeerInfoForGlobalMap {
|
||||||
@@ -34,10 +16,10 @@ impl From<Vec<PeerInfo>> for PeerInfoForGlobalMap {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|conn| conn.stats.as_ref().unwrap().latency_us)
|
.map(|conn| conn.stats.as_ref().unwrap().latency_us)
|
||||||
.min()
|
.min()
|
||||||
.unwrap_or(0);
|
.unwrap_or(u32::MAX as u64);
|
||||||
|
|
||||||
let dp_info = DirectConnectedPeerInfo {
|
let dp_info = DirectConnectedPeerInfo {
|
||||||
latency_level: LatencyLevel::from_latency_ms(min_lat as u32 / 1000) as i32,
|
latency_ms: std::cmp::max(1, (min_lat as u32 / 1000) as i32),
|
||||||
};
|
};
|
||||||
|
|
||||||
// sort conn info so hash result is stable
|
// sort conn info so hash result is stable
|
||||||
@@ -73,11 +55,7 @@ pub struct GetGlobalPeerMapResponse {
|
|||||||
pub trait PeerCenterService {
|
pub trait PeerCenterService {
|
||||||
// report center server which peer is directly connected to me
|
// report center server which peer is directly connected to me
|
||||||
// digest is a hash of current peer map, if digest not match, we need to transfer the whole map
|
// digest is a hash of current peer map, if digest not match, we need to transfer the whole map
|
||||||
async fn report_peers(
|
async fn report_peers(my_peer_id: PeerId, peers: PeerInfoForGlobalMap) -> Result<(), Error>;
|
||||||
my_peer_id: PeerId,
|
|
||||||
peers: Option<PeerInfoForGlobalMap>,
|
|
||||||
digest: Digest,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
async fn get_global_peer_map(digest: Digest)
|
async fn get_global_peer_map(digest: Digest)
|
||||||
-> Result<Option<GetGlobalPeerMapResponse>, Error>;
|
-> Result<Option<GetGlobalPeerMapResponse>, Error>;
|
||||||
|
|||||||
@@ -683,16 +683,18 @@ impl PeerRouteServiceImpl {
|
|||||||
DefaultRouteCostCalculator::default(),
|
DefaultRouteCostCalculator::default(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let calc_locked = self.cost_calculator.lock().unwrap();
|
let mut calc_locked = self.cost_calculator.lock().unwrap();
|
||||||
if calc_locked.is_none() {
|
if calc_locked.is_none() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
calc_locked.as_mut().unwrap().begin_update();
|
||||||
self.route_table_with_cost.build_from_synced_info(
|
self.route_table_with_cost.build_from_synced_info(
|
||||||
self.my_peer_id,
|
self.my_peer_id,
|
||||||
&self.synced_route_info,
|
&self.synced_route_info,
|
||||||
&calc_locked.as_ref().unwrap(),
|
calc_locked.as_mut().unwrap(),
|
||||||
);
|
);
|
||||||
|
calc_locked.as_mut().unwrap().end_update();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cost_calculator_need_update(&self) -> bool {
|
fn cost_calculator_need_update(&self) -> bool {
|
||||||
|
|||||||
@@ -31,8 +31,11 @@ pub trait RouteInterface {
|
|||||||
|
|
||||||
pub type RouteInterfaceBox = Box<dyn RouteInterface + Send + Sync>;
|
pub type RouteInterfaceBox = Box<dyn RouteInterface + Send + Sync>;
|
||||||
|
|
||||||
#[auto_impl::auto_impl(Box, Arc, &)]
|
#[auto_impl::auto_impl(Box , &mut)]
|
||||||
pub trait RouteCostCalculatorInterface: Send + Sync {
|
pub trait RouteCostCalculatorInterface: Send + Sync {
|
||||||
|
fn begin_update(&mut self) {}
|
||||||
|
fn end_update(&mut self) {}
|
||||||
|
|
||||||
fn calculate_cost(&self, _src: PeerId, _dst: PeerId) -> i32 {
|
fn calculate_cost(&self, _src: PeerId, _dst: PeerId) -> i32 {
|
||||||
1
|
1
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user