mirror of
https://mirror.suhoan.cn/https://github.com/EasyTier/EasyTier.git
synced 2025-12-12 12:47:25 +08:00
introduce gui based on tauri (#52)
This commit is contained in:
214
easytier-gui/src-tauri/src/launcher.rs
Normal file
214
easytier-gui/src-tauri/src/launcher.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
sync::{atomic::AtomicBool, Arc, RwLock},
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Local};
|
||||
use easytier::{
|
||||
common::{
|
||||
config::{ConfigLoader, TomlConfigLoader},
|
||||
global_ctx::GlobalCtxEvent,
|
||||
stun::StunInfoCollectorTrait,
|
||||
},
|
||||
instance::instance::Instance,
|
||||
peers::rpc_service::PeerManagerRpcService,
|
||||
rpc::{
|
||||
cli::{PeerInfo, Route, StunInfo},
|
||||
peer::GetIpListResponse,
|
||||
},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct MyNodeInfo {
|
||||
pub virtual_ipv4: String,
|
||||
pub ips: GetIpListResponse,
|
||||
pub stun_info: StunInfo,
|
||||
pub listeners: Vec<String>,
|
||||
pub vpn_portal_cfg: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct EasyTierData {
|
||||
events: Arc<RwLock<VecDeque<(DateTime<Local>, GlobalCtxEvent)>>>,
|
||||
node_info: Arc<RwLock<MyNodeInfo>>,
|
||||
routes: Arc<RwLock<Vec<Route>>>,
|
||||
peers: Arc<RwLock<Vec<PeerInfo>>>,
|
||||
}
|
||||
|
||||
pub struct EasyTierLauncher {
|
||||
instance_alive: Arc<AtomicBool>,
|
||||
stop_flag: Arc<AtomicBool>,
|
||||
thread_handle: Option<std::thread::JoinHandle<()>>,
|
||||
running_cfg: String,
|
||||
|
||||
error_msg: Arc<RwLock<Option<String>>>,
|
||||
data: EasyTierData,
|
||||
}
|
||||
|
||||
impl EasyTierLauncher {
|
||||
pub fn new() -> Self {
|
||||
let instance_alive = Arc::new(AtomicBool::new(false));
|
||||
Self {
|
||||
instance_alive,
|
||||
thread_handle: None,
|
||||
error_msg: Arc::new(RwLock::new(None)),
|
||||
running_cfg: String::new(),
|
||||
|
||||
stop_flag: Arc::new(AtomicBool::new(false)),
|
||||
data: EasyTierData::default(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_easytier_event(event: GlobalCtxEvent, data: EasyTierData) {
|
||||
let mut events = data.events.write().unwrap();
|
||||
events.push_back((chrono::Local::now(), event));
|
||||
if events.len() > 100 {
|
||||
events.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
async fn easytier_routine(
|
||||
cfg: TomlConfigLoader,
|
||||
stop_signal: Arc<tokio::sync::Notify>,
|
||||
data: EasyTierData,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let mut instance = Instance::new(cfg);
|
||||
let peer_mgr = instance.get_peer_manager();
|
||||
|
||||
// Subscribe to global context events
|
||||
let global_ctx = instance.get_global_ctx();
|
||||
let data_c = data.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut receiver = global_ctx.subscribe();
|
||||
while let Ok(event) = receiver.recv().await {
|
||||
Self::handle_easytier_event(event, data_c.clone()).await;
|
||||
}
|
||||
});
|
||||
|
||||
// update my node info
|
||||
let data_c = data.clone();
|
||||
let global_ctx_c = instance.get_global_ctx();
|
||||
let peer_mgr_c = peer_mgr.clone();
|
||||
let vpn_portal = instance.get_vpn_portal_inst();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let node_info = MyNodeInfo {
|
||||
virtual_ipv4: global_ctx_c
|
||||
.get_ipv4()
|
||||
.map(|x| x.to_string())
|
||||
.unwrap_or_default(),
|
||||
ips: global_ctx_c.get_ip_collector().collect_ip_addrs().await,
|
||||
stun_info: global_ctx_c.get_stun_info_collector().get_stun_info(),
|
||||
listeners: global_ctx_c
|
||||
.get_running_listeners()
|
||||
.iter()
|
||||
.map(|x| x.to_string())
|
||||
.collect(),
|
||||
vpn_portal_cfg: Some(
|
||||
vpn_portal
|
||||
.lock()
|
||||
.await
|
||||
.dump_client_config(peer_mgr_c.clone())
|
||||
.await,
|
||||
),
|
||||
};
|
||||
*data_c.node_info.write().unwrap() = node_info.clone();
|
||||
*data_c.routes.write().unwrap() = peer_mgr_c.list_routes().await;
|
||||
*data_c.peers.write().unwrap() = PeerManagerRpcService::new(peer_mgr_c.clone())
|
||||
.list_peers()
|
||||
.await;
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
});
|
||||
|
||||
instance.run().await?;
|
||||
stop_signal.notified().await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn start<F>(&mut self, cfg_generator: F)
|
||||
where
|
||||
F: FnOnce() -> Result<TomlConfigLoader, anyhow::Error> + Send + Sync,
|
||||
{
|
||||
let error_msg = self.error_msg.clone();
|
||||
let cfg = cfg_generator();
|
||||
if let Err(e) = cfg {
|
||||
error_msg.write().unwrap().replace(e.to_string());
|
||||
return;
|
||||
}
|
||||
|
||||
self.running_cfg = cfg.as_ref().unwrap().dump();
|
||||
|
||||
let stop_flag = self.stop_flag.clone();
|
||||
|
||||
let instance_alive = self.instance_alive.clone();
|
||||
instance_alive.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let data = self.data.clone();
|
||||
|
||||
self.thread_handle = Some(std::thread::spawn(move || {
|
||||
let rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
.unwrap();
|
||||
let stop_notifier = Arc::new(tokio::sync::Notify::new());
|
||||
|
||||
let stop_notifier_clone = stop_notifier.clone();
|
||||
rt.spawn(async move {
|
||||
while !stop_flag.load(std::sync::atomic::Ordering::Relaxed) {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
stop_notifier_clone.notify_one();
|
||||
});
|
||||
|
||||
let ret = rt.block_on(Self::easytier_routine(
|
||||
cfg.unwrap(),
|
||||
stop_notifier.clone(),
|
||||
data,
|
||||
));
|
||||
if let Err(e) = ret {
|
||||
error_msg.write().unwrap().replace(e.to_string());
|
||||
}
|
||||
instance_alive.store(false, std::sync::atomic::Ordering::Relaxed);
|
||||
}));
|
||||
}
|
||||
|
||||
pub fn error_msg(&self) -> Option<String> {
|
||||
self.error_msg.read().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn running(&self) -> bool {
|
||||
self.instance_alive
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn get_events(&self) -> Vec<(DateTime<Local>, GlobalCtxEvent)> {
|
||||
let events = self.data.events.read().unwrap();
|
||||
events.iter().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn get_node_info(&self) -> MyNodeInfo {
|
||||
self.data.node_info.read().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn get_routes(&self) -> Vec<Route> {
|
||||
self.data.routes.read().unwrap().clone()
|
||||
}
|
||||
|
||||
pub fn get_peers(&self) -> Vec<PeerInfo> {
|
||||
self.data.peers.read().unwrap().clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for EasyTierLauncher {
|
||||
fn drop(&mut self) {
|
||||
self.stop_flag
|
||||
.store(true, std::sync::atomic::Ordering::Relaxed);
|
||||
if let Some(handle) = self.thread_handle.take() {
|
||||
if let Err(e) = handle.join() {
|
||||
println!("Error when joining thread: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
353
easytier-gui/src-tauri/src/main.rs
Normal file
353
easytier-gui/src-tauri/src/main.rs
Normal file
@@ -0,0 +1,353 @@
|
||||
// Prevents additional console window on Windows in release, DO NOT REMOVE!!
|
||||
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
|
||||
|
||||
mod launcher;
|
||||
|
||||
use std::{collections::BTreeMap, env::current_exe, process};
|
||||
|
||||
use anyhow::Context;
|
||||
use chrono::{DateTime, Local};
|
||||
use dashmap::DashMap;
|
||||
use easytier::{
|
||||
common::{
|
||||
config::{ConfigLoader, NetworkIdentity, PeerConfig, TomlConfigLoader, VpnPortalConfig},
|
||||
global_ctx::GlobalCtxEvent,
|
||||
},
|
||||
rpc::{PeerInfo, Route},
|
||||
utils::{list_peer_route_pair, PeerRoutePair},
|
||||
};
|
||||
use launcher::{EasyTierLauncher, MyNodeInfo};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use tauri::{
|
||||
CustomMenuItem, Manager, SystemTray, SystemTrayEvent, SystemTrayMenu, SystemTrayMenuItem,
|
||||
Window,
|
||||
};
|
||||
|
||||
#[derive(Deserialize, Serialize, PartialEq, Debug)]
|
||||
enum NetworkingMethod {
|
||||
PublicServer,
|
||||
Manual,
|
||||
Standalone,
|
||||
}
|
||||
|
||||
impl Default for NetworkingMethod {
|
||||
fn default() -> Self {
|
||||
NetworkingMethod::PublicServer
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Default)]
|
||||
struct NetworkConfig {
|
||||
instance_id: String,
|
||||
|
||||
virtual_ipv4: String,
|
||||
network_name: String,
|
||||
network_secret: String,
|
||||
networking_method: NetworkingMethod,
|
||||
|
||||
public_server_url: String,
|
||||
peer_urls: Vec<String>,
|
||||
|
||||
proxy_cidrs: Vec<String>,
|
||||
|
||||
enable_vpn_portal: bool,
|
||||
vpn_portal_listne_port: i32,
|
||||
vpn_portal_client_network_addr: String,
|
||||
vpn_portal_client_network_len: i32,
|
||||
|
||||
advanced_settings: bool,
|
||||
|
||||
listener_urls: Vec<String>,
|
||||
rpc_port: i32,
|
||||
}
|
||||
|
||||
impl NetworkConfig {
|
||||
fn gen_config(&self) -> Result<TomlConfigLoader, anyhow::Error> {
|
||||
let cfg = TomlConfigLoader::default();
|
||||
cfg.set_id(
|
||||
self.instance_id
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse instance id: {}", self.instance_id))?,
|
||||
);
|
||||
cfg.set_inst_name(self.network_name.clone());
|
||||
cfg.set_network_identity(NetworkIdentity {
|
||||
network_name: self.network_name.clone(),
|
||||
network_secret: self.network_secret.clone(),
|
||||
});
|
||||
|
||||
if self.virtual_ipv4.len() > 0 {
|
||||
cfg.set_ipv4(
|
||||
self.virtual_ipv4.parse().with_context(|| {
|
||||
format!("failed to parse ipv4 address: {}", self.virtual_ipv4)
|
||||
})?,
|
||||
)
|
||||
}
|
||||
|
||||
match self.networking_method {
|
||||
NetworkingMethod::PublicServer => {
|
||||
cfg.set_peers(vec![PeerConfig {
|
||||
uri: self.public_server_url.parse().with_context(|| {
|
||||
format!(
|
||||
"failed to parse public server uri: {}",
|
||||
self.public_server_url
|
||||
)
|
||||
})?,
|
||||
}]);
|
||||
}
|
||||
NetworkingMethod::Manual => {
|
||||
let mut peers = vec![];
|
||||
for peer_url in self.peer_urls.iter() {
|
||||
if peer_url.is_empty() {
|
||||
continue;
|
||||
}
|
||||
peers.push(PeerConfig {
|
||||
uri: peer_url
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse peer uri: {}", peer_url))?,
|
||||
});
|
||||
}
|
||||
|
||||
cfg.set_peers(peers);
|
||||
}
|
||||
NetworkingMethod::Standalone => {}
|
||||
}
|
||||
|
||||
let mut listener_urls = vec![];
|
||||
for listener_url in self.listener_urls.iter() {
|
||||
if listener_url.is_empty() {
|
||||
continue;
|
||||
}
|
||||
listener_urls.push(
|
||||
listener_url
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse listener uri: {}", listener_url))?,
|
||||
);
|
||||
}
|
||||
cfg.set_listeners(listener_urls);
|
||||
|
||||
for n in self.proxy_cidrs.iter() {
|
||||
cfg.add_proxy_cidr(
|
||||
n.parse()
|
||||
.with_context(|| format!("failed to parse proxy network: {}", n))?,
|
||||
);
|
||||
}
|
||||
|
||||
cfg.set_rpc_portal(
|
||||
format!("127.0.0.1:{}", self.rpc_port)
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse rpc portal port: {}", self.rpc_port))?,
|
||||
);
|
||||
|
||||
if self.enable_vpn_portal {
|
||||
let cidr = format!(
|
||||
"{}/{}",
|
||||
self.vpn_portal_client_network_addr, self.vpn_portal_client_network_len
|
||||
);
|
||||
cfg.set_vpn_portal_config(VpnPortalConfig {
|
||||
client_cidr: cidr
|
||||
.parse()
|
||||
.with_context(|| format!("failed to parse vpn portal client cidr: {}", cidr))?,
|
||||
wireguard_listen: format!("0.0.0.0:{}", self.vpn_portal_listne_port)
|
||||
.parse()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"failed to parse vpn portal wireguard listen port. {}",
|
||||
self.vpn_portal_listne_port
|
||||
)
|
||||
})?,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug)]
|
||||
struct NetworkInstanceRunningInfo {
|
||||
my_node_info: MyNodeInfo,
|
||||
events: Vec<(DateTime<Local>, GlobalCtxEvent)>,
|
||||
node_info: MyNodeInfo,
|
||||
routes: Vec<Route>,
|
||||
peers: Vec<PeerInfo>,
|
||||
peer_route_pairs: Vec<PeerRoutePair>,
|
||||
running: bool,
|
||||
error_msg: Option<String>,
|
||||
}
|
||||
|
||||
struct NetworkInstance {
|
||||
config: TomlConfigLoader,
|
||||
launcher: Option<EasyTierLauncher>,
|
||||
}
|
||||
|
||||
impl NetworkInstance {
|
||||
fn new(cfg: NetworkConfig) -> Result<Self, anyhow::Error> {
|
||||
Ok(Self {
|
||||
config: cfg.gen_config()?,
|
||||
launcher: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn is_easytier_running(&self) -> bool {
|
||||
self.launcher.is_some() && self.launcher.as_ref().unwrap().running()
|
||||
}
|
||||
|
||||
fn get_running_info(&self) -> Option<NetworkInstanceRunningInfo> {
|
||||
if self.launcher.is_none() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let launcher = self.launcher.as_ref().unwrap();
|
||||
|
||||
let peers = launcher.get_peers();
|
||||
let routes = launcher.get_routes();
|
||||
let peer_route_pairs = list_peer_route_pair(peers.clone(), routes.clone());
|
||||
|
||||
Some(NetworkInstanceRunningInfo {
|
||||
my_node_info: launcher.get_node_info(),
|
||||
events: launcher.get_events(),
|
||||
node_info: launcher.get_node_info(),
|
||||
routes,
|
||||
peers,
|
||||
peer_route_pairs,
|
||||
running: launcher.running(),
|
||||
error_msg: launcher.error_msg(),
|
||||
})
|
||||
}
|
||||
|
||||
fn start(&mut self) -> Result<(), anyhow::Error> {
|
||||
if self.is_easytier_running() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut launcher = EasyTierLauncher::new();
|
||||
launcher.start(|| Ok(self.config.clone()));
|
||||
|
||||
self.launcher = Some(launcher);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
static INSTANCE_MAP: once_cell::sync::Lazy<DashMap<String, NetworkInstance>> =
|
||||
once_cell::sync::Lazy::new(DashMap::new);
|
||||
|
||||
// Learn more about Tauri commands at https://tauri.app/v1/guides/features/command
|
||||
#[tauri::command]
|
||||
fn parse_network_config(cfg: &str) -> Result<String, String> {
|
||||
let cfg: NetworkConfig = serde_json::from_str(cfg).map_err(|e| e.to_string())?;
|
||||
let toml = cfg.gen_config().map_err(|e| e.to_string())?;
|
||||
Ok(toml.dump())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn run_network_instance(cfg: &str) -> Result<String, String> {
|
||||
let cfg: NetworkConfig = serde_json::from_str(cfg).map_err(|e| e.to_string())?;
|
||||
if INSTANCE_MAP.contains_key(&cfg.instance_id) {
|
||||
return Err("instance already exists".to_string());
|
||||
}
|
||||
let instance_id = cfg.instance_id.clone();
|
||||
|
||||
let mut instance = NetworkInstance::new(cfg).map_err(|e| e.to_string())?;
|
||||
instance.start().map_err(|e| e.to_string())?;
|
||||
|
||||
println!("instance {} started", instance_id);
|
||||
INSTANCE_MAP.insert(instance_id, instance);
|
||||
Ok("".to_string())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn retain_network_instance(instance_ids: &str) -> Result<(), String> {
|
||||
let instance_ids: Vec<String> =
|
||||
serde_json::from_str(instance_ids).map_err(|e| e.to_string())?;
|
||||
let _ = INSTANCE_MAP.retain(|k, _| instance_ids.contains(k));
|
||||
println!(
|
||||
"instance {:?} retained",
|
||||
INSTANCE_MAP
|
||||
.iter()
|
||||
.map(|item| item.key().clone())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
fn collect_network_infos() -> Result<String, String> {
|
||||
let mut ret = BTreeMap::new();
|
||||
for instance in INSTANCE_MAP.iter() {
|
||||
if let Some(info) = instance.get_running_info() {
|
||||
ret.insert(instance.key().clone(), info);
|
||||
}
|
||||
}
|
||||
Ok(serde_json::to_string(&ret).map_err(|e| e.to_string())?)
|
||||
}
|
||||
|
||||
fn toggle_window_visibility(window: &Window) {
|
||||
if window.is_visible().unwrap() {
|
||||
window.hide().unwrap();
|
||||
} else {
|
||||
window.show().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn check_sudo() -> bool {
|
||||
let is_elevated = privilege::user::privileged();
|
||||
if !is_elevated {
|
||||
let Ok(my_exe) = current_exe() else {
|
||||
return true;
|
||||
};
|
||||
let mut elevated_cmd = privilege::runas::Command::new(my_exe);
|
||||
let _ = elevated_cmd.force_prompt(true).gui(true).run();
|
||||
}
|
||||
is_elevated
|
||||
}
|
||||
|
||||
fn main() {
|
||||
if !check_sudo() {
|
||||
process::exit(0);
|
||||
}
|
||||
let quit = CustomMenuItem::new("quit".to_string(), "Quit");
|
||||
let hide = CustomMenuItem::new("hide".to_string(), "Show / Hide");
|
||||
let tray_menu = SystemTrayMenu::new()
|
||||
.add_item(quit)
|
||||
.add_native_item(SystemTrayMenuItem::Separator)
|
||||
.add_item(hide);
|
||||
|
||||
tauri::Builder::default()
|
||||
.invoke_handler(tauri::generate_handler![
|
||||
parse_network_config,
|
||||
run_network_instance,
|
||||
retain_network_instance,
|
||||
collect_network_infos
|
||||
])
|
||||
.system_tray(SystemTray::new().with_menu(tray_menu))
|
||||
.on_system_tray_event(|app, event| match event {
|
||||
SystemTrayEvent::DoubleClick {
|
||||
position: _,
|
||||
size: _,
|
||||
..
|
||||
} => {
|
||||
let window = app.get_window("main").unwrap();
|
||||
toggle_window_visibility(&window);
|
||||
}
|
||||
SystemTrayEvent::MenuItemClick { id, .. } => match id.as_str() {
|
||||
"quit" => {
|
||||
std::process::exit(0);
|
||||
}
|
||||
"hide" => {
|
||||
let window = app.get_window("main").unwrap();
|
||||
toggle_window_visibility(&window);
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
_ => {}
|
||||
})
|
||||
.on_window_event(|event| match event.event() {
|
||||
tauri::WindowEvent::CloseRequested { api, .. } => {
|
||||
event.window().hide().unwrap();
|
||||
api.prevent_close();
|
||||
}
|
||||
_ => {}
|
||||
})
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
}
|
||||
Reference in New Issue
Block a user