Improve reliability using event-based synchronization

This commit is contained in:
Aram 🍐 2022-01-08 01:05:51 -05:00
parent 62b2641627
commit 51788c9557
12 changed files with 628 additions and 805 deletions

View file

@ -2,6 +2,7 @@ use std::net::IpAddr;
use std::sync::Arc;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::wg::WireGuardTunnel;
@ -16,6 +17,7 @@ pub async fn port_forward(
tcp_port_pool: TcpPortPool,
udp_port_pool: UdpPortPool,
wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> {
info!(
"Tunneling {} [{}]->[{}] (via [{}] as peer {})",
@ -27,7 +29,7 @@ pub async fn port_forward(
);
match port_forward.protocol {
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, wg).await,
PortProtocol::Udp => udp::udp_proxy_server(port_forward, udp_port_pool, wg).await,
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, bus).await,
PortProtocol::Udp => udp::udp_proxy_server(port_forward, udp_port_pool, bus).await,
}
}

View file

@ -1,17 +1,17 @@
use crate::config::{PortForwardConfig, PortProtocol};
use crate::virtual_iface::tcp::TcpVirtualInterface;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::wg::WireGuardTunnel;
use crate::virtual_iface::VirtualPort;
use anyhow::Context;
use std::collections::VecDeque;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::net::{TcpListener, TcpStream};
use std::ops::Range;
use std::time::Duration;
use crate::events::{Bus, Event};
use rand::seq::SliceRandom;
use rand::thread_rng;
use tokio::io::AsyncWriteExt;
const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000;
@ -22,14 +22,13 @@ const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
pub async fn tcp_proxy_server(
port_forward: PortForwardConfig,
port_pool: TcpPortPool,
wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(port_forward.source)
.await
.with_context(|| "Failed to listen on TCP proxy server")?;
loop {
let wg = wg.clone();
let port_pool = port_pool.clone();
let (socket, peer_addr) = listener
.accept()
@ -52,10 +51,10 @@ pub async fn tcp_proxy_server(
info!("[{}] Incoming connection from {}", virtual_port, peer_addr);
let bus = bus.clone();
tokio::spawn(async move {
let port_pool = port_pool.clone();
let result =
handle_tcp_proxy_connection(socket, virtual_port, port_forward, wg.clone()).await;
let result = handle_tcp_proxy_connection(socket, virtual_port, port_forward, bus).await;
if let Err(e) = result {
error!(
@ -66,8 +65,7 @@ pub async fn tcp_proxy_server(
info!("[{}] Connection closed by client", virtual_port);
}
// Release port when connection drops
wg.release_virtual_interface(VirtualPort(virtual_port, PortProtocol::Tcp));
tokio::time::sleep(Duration::from_millis(100)).await; // Make sure the other tasks have time to process the event
port_pool.release(virtual_port).await;
});
}
@ -75,72 +73,26 @@ pub async fn tcp_proxy_server(
/// Handles a new TCP connection with its assigned virtual port.
async fn handle_tcp_proxy_connection(
socket: TcpStream,
virtual_port: u16,
mut socket: TcpStream,
virtual_port: VirtualPort,
port_forward: PortForwardConfig,
wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> {
// Abort signal for stopping the Virtual Interface
let abort = Arc::new(AtomicBool::new(false));
// Signals that the Virtual Client is ready to send data
let (virtual_client_ready_tx, virtual_client_ready_rx) = tokio::sync::oneshot::channel::<()>();
// data_to_real_client_(tx/rx): This task reads the data from this mpsc channel to send back
// to the real client.
let (data_to_real_client_tx, mut data_to_real_client_rx) = tokio::sync::mpsc::channel(1_000);
// data_to_real_server_(tx/rx): This task sends the data received from the real client to the
// virtual interface (virtual server socket).
let (data_to_virtual_server_tx, data_to_virtual_server_rx) = tokio::sync::mpsc::channel(1_000);
// Spawn virtual interface
{
let abort = abort.clone();
let virtual_interface = TcpVirtualInterface::new(
virtual_port,
port_forward,
wg,
abort.clone(),
data_to_real_client_tx,
data_to_virtual_server_rx,
virtual_client_ready_tx,
);
tokio::spawn(async move {
virtual_interface.poll_loop().await.unwrap_or_else(|e| {
error!("Virtual interface poll loop failed unexpectedly: {}", e);
abort.store(true, Ordering::Relaxed);
})
});
}
// Wait for virtual client to be ready.
virtual_client_ready_rx
.await
.with_context(|| "Virtual client dropped before being ready.")?;
trace!("[{}] Virtual client is ready to send data", virtual_port);
let mut endpoint = bus.new_endpoint();
endpoint.send(Event::ClientConnectionInitiated(port_forward, virtual_port));
let mut buffer = Vec::with_capacity(MAX_PACKET);
loop {
tokio::select! {
readable_result = socket.readable() => {
match readable_result {
Ok(_) => {
// Buffer for the individual TCP segment.
let mut buffer = Vec::with_capacity(MAX_PACKET);
match socket.try_read_buf(&mut buffer) {
Ok(size) if size > 0 => {
let data = &buffer[..size];
debug!(
"[{}] Read {} bytes of TCP data from real client",
virtual_port, size
);
if let Err(e) = data_to_virtual_server_tx.send(data.to_vec()).await {
error!(
"[{}] Failed to dispatch data to virtual interface: {:?}",
virtual_port, e
);
}
let data = Vec::from(&buffer[..size]);
endpoint.send(Event::LocalData(virtual_port, data));
// Reset buffer
buffer.clear();
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
@ -163,43 +115,32 @@ async fn handle_tcp_proxy_connection(
}
}
}
data_recv_result = data_to_real_client_rx.recv() => {
match data_recv_result {
Some(data) => match socket.try_write(&data) {
Ok(size) => {
debug!(
"[{}] Wrote {} bytes of TCP data to real client",
virtual_port, size
);
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
if abort.load(Ordering::Relaxed) {
event = endpoint.recv() => {
match event {
Event::ClientConnectionDropped(e_vp) if e_vp == virtual_port => {
// This connection is supposed to be closed, stop the task.
break;
}
Event::RemoteData(e_vp, data) if e_vp == virtual_port => {
// Have remote data to send to the local client
let size = data.len();
match socket.write(&data).await {
Ok(size) => debug!("[{}] Sent {} bytes to local client", virtual_port, size),
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, size, e);
break;
} else {
continue;
}
}
Err(e) => {
error!(
"[{}] Failed to write to client TCP socket: {:?}",
virtual_port, e
);
}
},
None => {
if abort.load(Ordering::Relaxed) {
break;
} else {
continue;
}
},
}
_ => {}
}
}
}
}
trace!("[{}] TCP socket handler task terminated", virtual_port);
abort.store(true, Ordering::Relaxed);
// Notify other endpoints that this task has closed and no more data is to be sent to the local client
endpoint.send(Event::ClientConnectionDropped(virtual_port));
Ok(())
}
@ -230,19 +171,19 @@ impl TcpPortPool {
}
/// Requests a free port from the pool. An error is returned if none is available (exhaused max capacity).
pub async fn next(&self) -> anyhow::Result<u16> {
pub async fn next(&self) -> anyhow::Result<VirtualPort> {
let mut inner = self.inner.write().await;
let port = inner
.queue
.pop_front()
.with_context(|| "TCP virtual port pool is exhausted")?;
Ok(port)
Ok(VirtualPort::new(port, PortProtocol::Tcp))
}
/// Releases a port back into the pool.
pub async fn release(&self, port: u16) {
pub async fn release(&self, port: VirtualPort) {
let mut inner = self.inner.write().await;
inner.queue.push_back(port);
inner.queue.push_back(port.num());
}
}

View file

@ -5,6 +5,7 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Instant;
use crate::events::{Bus, Event};
use anyhow::Context;
use priority_queue::double_priority_queue::DoublePriorityQueue;
use priority_queue::priority_queue::PriorityQueue;
@ -30,61 +31,24 @@ const UDP_TIMEOUT_SECONDS: u64 = 60;
/// TODO: Make this configurable by the CLI
const PORTS_PER_IP: usize = 100;
/// Starts the server that listens on UDP datagrams.
pub async fn udp_proxy_server(
port_forward: PortForwardConfig,
port_pool: UdpPortPool,
wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> {
// Abort signal
let abort = Arc::new(AtomicBool::new(false));
// data_to_real_client_(tx/rx): This task reads the data from this mpsc channel to send back
// to the real client.
let (data_to_real_client_tx, mut data_to_real_client_rx) =
tokio::sync::mpsc::channel::<(VirtualPort, Vec<u8>)>(1_000);
// data_to_real_server_(tx/rx): This task sends the data received from the real client to the
// virtual interface (virtual server socket).
let (data_to_virtual_server_tx, data_to_virtual_server_rx) =
tokio::sync::mpsc::channel::<(VirtualPort, Vec<u8>)>(1_000);
{
// Spawn virtual interface
// Note: contrary to TCP, there is only one UDP virtual interface
let virtual_interface = UdpVirtualInterface::new(
port_forward,
wg,
data_to_real_client_tx,
data_to_virtual_server_rx,
);
let abort = abort.clone();
tokio::spawn(async move {
virtual_interface.poll_loop().await.unwrap_or_else(|e| {
error!("Virtual interface poll loop failed unexpectedly: {}", e);
abort.store(true, Ordering::Relaxed);
});
});
}
let mut endpoint = bus.new_endpoint();
let socket = UdpSocket::bind(port_forward.source)
.await
.with_context(|| "Failed to bind on UDP proxy address")?;
let mut buffer = [0u8; MAX_PACKET];
loop {
if abort.load(Ordering::Relaxed) {
break;
}
tokio::select! {
to_send_result = next_udp_datagram(&socket, &mut buffer, port_pool.clone()) => {
match to_send_result {
Ok(Some((port, data))) => {
data_to_virtual_server_tx.send((port, data)).await.unwrap_or_else(|e| {
error!(
"Failed to dispatch data to UDP virtual interface: {:?}",
e
);
});
endpoint.send(Event::LocalData(port, data));
}
Ok(None) => {
continue;
@ -98,9 +62,9 @@ pub async fn udp_proxy_server(
}
}
}
data_recv_result = data_to_real_client_rx.recv() => {
if let Some((port, data)) = data_recv_result {
if let Some(peer_addr) = port_pool.get_peer_addr(port.0).await {
event = endpoint.recv() => {
if let Event::RemoteData(port, data) = event {
if let Some(peer_addr) = port_pool.get_peer_addr(port).await {
if let Err(e) = socket.send_to(&data, peer_addr).await {
error!(
"[{}] Failed to send UDP datagram to real client ({}): {:?}",
@ -109,7 +73,7 @@ pub async fn udp_proxy_server(
e,
);
}
port_pool.update_last_transmit(port.0).await;
port_pool.update_last_transmit(port).await;
}
}
}
@ -141,14 +105,13 @@ async fn next_udp_datagram(
return Ok(None);
}
};
let port = VirtualPort(port, PortProtocol::Udp);
debug!(
"[{}] Received datagram of {} bytes from {}",
port, size, peer_addr
);
port_pool.update_last_transmit(port.0).await;
port_pool.update_last_transmit(port).await;
let data = buffer[..size].to_vec();
Ok(Some((port, data)))
@ -181,14 +144,14 @@ impl UdpPortPool {
}
/// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self, peer_addr: SocketAddr) -> anyhow::Result<u16> {
pub async fn next(&self, peer_addr: SocketAddr) -> anyhow::Result<VirtualPort> {
// A port found to be reused. This is outside of the block because the read lock cannot be upgraded to a write lock.
let mut port_reuse: Option<u16> = None;
{
let inner = self.inner.read().await;
if let Some(port) = inner.port_by_peer_addr.get(&peer_addr) {
return Ok(*port);
return Ok(VirtualPort::new(*port, PortProtocol::Udp));
}
// Count how many ports are being used by the peer IP
@ -240,26 +203,26 @@ impl UdpPortPool {
inner.port_by_peer_addr.insert(peer_addr, port);
inner.peer_addr_by_port.insert(port, peer_addr);
Ok(port)
Ok(VirtualPort::new(port, PortProtocol::Udp))
}
/// Notify that the given virtual port has received or transmitted a UDP datagram.
pub async fn update_last_transmit(&self, port: u16) {
pub async fn update_last_transmit(&self, port: VirtualPort) {
let mut inner = self.inner.write().await;
if let Some(peer) = inner.peer_addr_by_port.get(&port).copied() {
if let Some(peer) = inner.peer_addr_by_port.get(&port.num()).copied() {
let mut pq: &mut DoublePriorityQueue<u16, Instant> = inner
.peer_port_usage
.entry(peer.ip())
.or_insert_with(Default::default);
pq.push(port, Instant::now());
pq.push(port.num(), Instant::now());
}
let mut pq: &mut DoublePriorityQueue<u16, Instant> = &mut inner.port_usage;
pq.push(port, Instant::now());
pq.push(port.num(), Instant::now());
}
pub async fn get_peer_addr(&self, port: u16) -> Option<SocketAddr> {
pub async fn get_peer_addr(&self, port: VirtualPort) -> Option<SocketAddr> {
let inner = self.inner.read().await;
inner.peer_addr_by_port.get(&port).copied()
inner.peer_addr_by_port.get(&port.num()).copied()
}
}