Index ports with protocol in WG. Start writing UDP tunnel code with plans.

This commit is contained in:
Aram 🍐 2021-10-19 01:55:04 -04:00
parent 703f261344
commit 5cec6d4943
10 changed files with 156 additions and 91 deletions

View file

@ -2,15 +2,17 @@ use std::net::IpAddr;
use std::sync::Arc;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::port_pool::PortPool;
use crate::tunnel::tcp::TcpPortPool;
use crate::wg::WireGuardTunnel;
mod tcp;
pub mod tcp;
#[allow(unused)]
pub mod udp;
pub async fn port_forward(
port_forward: PortForwardConfig,
source_peer_ip: IpAddr,
port_pool: Arc<PortPool>,
tcp_port_pool: Arc<TcpPortPool>,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
info!(
@ -23,7 +25,7 @@ pub async fn port_forward(
);
match port_forward.protocol {
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, port_pool, wg).await,
PortProtocol::Udp => Err(anyhow::anyhow!("UDP isn't supported just yet.")),
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, wg).await,
PortProtocol::Udp => udp::udp_proxy_server(port_forward, /* udp_port_pool, */ wg).await,
}
}

View file

@ -1,19 +1,26 @@
use crate::config::PortForwardConfig;
use crate::port_pool::PortPool;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::virtual_iface::tcp::TcpVirtualInterface;
use crate::virtual_iface::VirtualInterfacePoll;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::wg::WireGuardTunnel;
use anyhow::Context;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::net::{TcpListener, TcpStream};
use std::ops::Range;
use rand::seq::SliceRandom;
use rand::thread_rng;
const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// Starts the server that listens on TCP connections.
pub async fn tcp_proxy_server(
port_forward: PortForwardConfig,
port_pool: Arc<PortPool>,
port_pool: Arc<TcpPortPool>,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(port_forward.source)
@ -59,7 +66,7 @@ pub async fn tcp_proxy_server(
}
// Release port when connection drops
wg.release_virtual_interface(virtual_port);
wg.release_virtual_interface(VirtualPort(virtual_port, PortProtocol::Tcp));
port_pool.release(virtual_port);
});
}
@ -194,3 +201,56 @@ async fn handle_tcp_proxy_connection(
abort.store(true, Ordering::Relaxed);
Ok(())
}
/// A pool of virtual ports available for TCP connections.
/// This structure is thread-safe and lock-free; you can use it safely in an `Arc`.
pub struct TcpPortPool {
/// Remaining ports
inner: lockfree::queue::Queue<u16>,
/// Ports in use, with their associated IP channel sender.
taken: lockfree::set::Set<u16>,
}
impl Default for TcpPortPool {
fn default() -> Self {
Self::new()
}
}
impl TcpPortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let inner = lockfree::queue::Queue::default();
let mut ports: Vec<u16> = PORT_RANGE.collect();
ports.shuffle(&mut thread_rng());
ports.into_iter().for_each(|p| inner.push(p) as ());
Self {
inner,
taken: lockfree::set::Set::new(),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhaused max capacity).
pub fn next(&self) -> anyhow::Result<u16> {
let port = self
.inner
.pop()
.with_context(|| "Virtual port pool is exhausted")?;
self.taken
.insert(port)
.ok()
.with_context(|| "Failed to insert taken")?;
Ok(port)
}
/// Releases a port back into the pool.
pub fn release(&self, port: u16) {
self.inner.push(port);
self.taken.remove(&port);
}
/// Whether the given port is in use by a virtual interface.
pub fn is_in_use(&self, port: u16) -> bool {
self.taken.contains(&port)
}
}

44
src/tunnel/udp.rs Normal file
View file

@ -0,0 +1,44 @@
use std::sync::Arc;
use anyhow::Context;
use tokio::net::UdpSocket;
use crate::config::PortForwardConfig;
use crate::wg::WireGuardTunnel;
const MAX_PACKET: usize = 65536;
/// How long to keep the UDP peer address assigned to its virtual specified port, in seconds.
const UDP_TIMEOUT_SECONDS: u64 = 60;
/// To prevent port-flooding, we set a limit on the amount of open ports per IP address.
const PORTS_PER_IP: usize = 100;
pub async fn udp_proxy_server(
port_forward: PortForwardConfig,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
let socket = UdpSocket::bind(port_forward.source)
.await
.with_context(|| "Failed to bind on UDP proxy address")?;
let mut buffer = [0u8; MAX_PACKET];
loop {
let (size, peer_addr) = socket
.recv_from(&mut buffer)
.await
.with_context(|| "Failed to accept incoming UDP datagram")?;
let _wg = wg.clone();
let _data = &buffer[..size].to_vec();
debug!("Received datagram of {} bytes from {}", size, peer_addr);
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
// Since UDP is connection-less, the port is assigned to the source SocketAddr for up to `UDP_TIMEOUT_SECONDS`;
// every datagram resets the timer for that SocketAddr. Each IP address also has a limit of active connections,
// discarding the LRU ports.
// TODO: UDP Port Pool
}
}