Compare commits

..

No commits in common. "master" and "v0.1.5" have entirely different histories.

26 changed files with 1365 additions and 3517 deletions

View file

@ -1,4 +0,0 @@
[env]
# Each interface needs 1 IP allocated to the WireGuard peer IP.
# "8" = 7 tunnels per protocol.
SMOLTCP_IFACE_MAX_ADDR_COUNT = "8"

View file

@ -1,6 +1,6 @@
#!/bin/sh
# brew install asciidoctor
brew install asciidoctor
# brew install openssl@1.1
# cp /usr/local/opt/openssl@1.1/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/
brew install openssl@1.1
cp /usr/local/opt/openssl@1.1/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/

View file

@ -1,10 +0,0 @@
# Please see the documentation for all configuration options:
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
rebase-strategy: "disabled"

BIN
.github/onetun.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

View file

@ -10,7 +10,7 @@ jobs:
matrix:
rust:
- stable
- 1.80.0
- 1.55.0
steps:
- name: Checkout sources
uses: actions/checkout@v2
@ -26,12 +26,6 @@ jobs:
with:
command: check
- name: Run cargo check without default features
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features
test:
name: Test Suite
runs-on: ubuntu-latest
@ -39,7 +33,7 @@ jobs:
matrix:
rust:
- stable
- 1.80.0
- 1.55.0
steps:
- name: Checkout sources
uses: actions/checkout@v2

View file

@ -61,7 +61,7 @@ jobs:
run: echo "${{ env.VERSION }}" > artifacts/release-version
- name: Upload artifacts
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v1
with:
name: artifacts
path: artifacts
@ -75,28 +75,20 @@ jobs:
RUST_BACKTRACE: 1
strategy:
matrix:
build: [ linux-amd64, linux-aarch64, macos-aarch64, windows ]
build: [ linux-amd64, macos-intel, windows ]
include:
- build: linux-amd64
os: ubuntu-latest
os: ubuntu-18.04
rust: stable
target: x86_64-unknown-linux-musl
cross: true
- build: linux-aarch64
os: ubuntu-latest
rust: stable
target: aarch64-unknown-linux-musl
cross: true
- build: macos-aarch64
- build: macos-intel
os: macos-latest
rust: stable
target: aarch64-apple-darwin
cross: false
target: x86_64-apple-darwin
- build: windows
os: windows-2019
rust: stable
target: x86_64-pc-windows-msvc
cross: false
steps:
- name: Checkout repository
@ -105,7 +97,7 @@ jobs:
fetch-depth: 1
- name: Install packages (Ubuntu)
if: matrix.os == 'ubuntu-latest'
if: matrix.os == 'ubuntu-18.04'
run: |
.github/ci/ubuntu-install-packages
- name: Install packages (macOS)
@ -121,7 +113,7 @@ jobs:
target: ${{ matrix.target }}
- name: Get release download URL
uses: actions/download-artifact@v4
uses: actions/download-artifact@v1
with:
name: artifacts
path: artifacts
@ -134,24 +126,17 @@ jobs:
echo "release upload url: $release_upload_url"
- name: Build onetun binary
shell: bash
run: |
if [ "${{ matrix.cross }}" = "true" ]; then
cargo install cross
cross build --release --target ${{ matrix.target }}
else
cargo build --release --target ${{ matrix.target }}
fi
run: cargo build --release
- name: Prepare onetun binary
shell: bash
run: |
mkdir -p ci/assets
if [ "${{ matrix.build }}" = "windows" ]; then
cp "target/${{ matrix.target }}/release/onetun.exe" "ci/assets/onetun.exe"
cp "target/release/onetun.exe" "ci/assets/onetun.exe"
echo "ASSET=onetun.exe" >> $GITHUB_ENV
else
cp "target/${{ matrix.target }}/release/onetun" "ci/assets/onetun-${{ matrix.build }}"
cp "target/release/onetun" "ci/assets/onetun-${{ matrix.build }}"
echo "ASSET=onetun-${{ matrix.build }}" >> $GITHUB_ENV
fi

3
.gitignore vendored
View file

@ -1,6 +1,3 @@
/target
/.idea
.envrc
*.log
*.pcap
.DS_Store

1287
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,48 +1,17 @@
[package]
name = "onetun"
version = "0.3.10"
edition = "2021"
license = "MIT"
description = "A cross-platform, user-space WireGuard port-forwarder that requires no system network configurations."
authors = ["Aram Peres <aram.peres@gmail.com>"]
repository = "https://github.com/aramperes/onetun"
version = "0.1.5"
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
# Required dependencies (bin and lib)
boringtun = { version = "0.6.0", default-features = false }
boringtun = { git = "https://github.com/cloudflare/boringtun", branch = "master" }
clap = { version = "2.33", default-features = false, features = ["suggestions"] }
log = "0.4"
pretty_env_logger = "0.3"
anyhow = "1"
tokio = { version = "1", features = [ "rt", "sync", "io-util", "net", "time", "fs", "macros" ] }
futures = "0.3"
rand = "0.8"
nom = "7"
async-trait = "0.1"
priority-queue = "2.1"
smoltcp = { version = "0.12", default-features = false, features = [
"std",
"log",
"medium-ip",
"proto-ipv4",
"proto-ipv6",
"socket-udp",
"socket-tcp",
] }
bytes = "1"
base64 = "0.13"
# forward boringtuns tracing events to log
tracing = { version = "0.1", default-features = false, features = ["log"] }
# bin-only dependencies
clap = { version = "4.4.11", default-features = false, features = ["suggestions", "std", "env", "help", "wrap_help"], optional = true }
pretty_env_logger = { version = "0.5", optional = true }
async-recursion = "1.0"
[features]
pcap = []
default = [ "bin" ]
bin = [ "clap", "pretty_env_logger", "pcap", "tokio/rt-multi-thread" ]
[lib]
smoltcp = { git = "https://github.com/smoltcp-rs/smoltcp", branch = "master" }
tokio = { version = "1", features = ["full"] }
lockfree = "0.5.1"
futures = "0.3.17"

View file

@ -1,11 +1,10 @@
FROM rust:1.82.0 as cargo-build
FROM rust:1.55 as cargo-build
WORKDIR /usr/src/onetun
COPY Cargo.toml Cargo.toml
# Placeholder to download dependencies and cache them using layering
RUN mkdir src/
RUN touch src/lib.rs
RUN echo "fn main() {println!(\"if you see this, the build broke\")}" > src/main.rs
RUN cargo build --release
RUN rm -f target/x86_64-unknown-linux-musl/release/deps/myapp*
@ -15,9 +14,8 @@ COPY . .
RUN cargo build --release
FROM debian:11-slim
RUN apt-get update \
&& apt-get install dumb-init -y \
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update
RUN apt-get install dumb-init -y
COPY --from=cargo-build /usr/src/onetun/target/release/onetun /usr/local/bin/onetun

View file

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2025 Aram Peres
Copyright (c) 2021 Aram Peres
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

267
README.md
View file

@ -1,67 +1,27 @@
<img align="right" alt="onetun" width="150" src=".github/onetun.png">
# onetun
A cross-platform, user-space WireGuard port-forwarder that requires **no root-access or system network configurations**.
A cross-platform, user-space WireGuard port-forwarder that requires no system network configurations.
[![crates.io](https://img.shields.io/crates/v/onetun.svg)](https://crates.io/crates/onetun)
[![MIT licensed](https://img.shields.io/crates/l/onetun.svg)](./LICENSE)
[![Build status](https://github.com/aramperes/onetun/actions/workflows/build.yml/badge.svg)](https://github.com/aramperes/onetun/actions)
[![Latest Release](https://img.shields.io/github/v/tag/aramperes/onetun?label=release)](https://github.com/aramperes/onetun/releases/latest)
## Use-case
Access TCP or UDP services running on your WireGuard network, from devices that don't have WireGuard installed.
For example,
- Personal or shared computers where you can't install WireGuard (root)
- IoT and mobile devices
- Root-less containers
## Download
onetun is available to install from [crates.io](https://crates.io/crates/onetun) with Rust ≥1.80.0:
```shell
cargo install onetun
```
You can also download the binary for Windows, macOS (Apple Silicon), and Linux (amd64, arm64) from
the [Releases](https://github.com/aramperes/onetun/releases) page.
You can also run onetun using [Docker](https://hub.docker.com/r/aramperes/onetun):
```shell
docker run --rm --name onetun --user 1000 -p 8080:8080 aramperes/onetun \
0.0.0.0:8080:192.168.4.2:8080 [...options...]
```
You can also build onetun locally, using Rust ≥1.80.0:
```shell
git clone https://github.com/aramperes/onetun && cd onetun
cargo build --release
./target/release/onetun
```
## Usage
**onetun** opens a TCP or UDP port on your local system, from which traffic is forwarded to a port on a peer in your
**onetun** opens a TCP port on your local system, from which traffic is forwarded to a TCP port on a peer in your
WireGuard network. It requires no changes to your operating system's network interfaces: you don't need to have `root`
access, or install any WireGuard tool on your local system for it to work.
The only prerequisite is to register a peer IP and public key on the remote WireGuard endpoint; those are necessary for
the WireGuard endpoint to trust the onetun peer and for packets to be routed.
```shell
onetun [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...] [...] \
--endpoint-addr <public WireGuard endpoint address> \
--endpoint-public-key <the public key of the peer on the endpoint> \
--private-key <private key assigned to onetun> \
--source-peer-ip <IP assigned to onetun> \
--keep-alive <optional persistent keep-alive in seconds> \
--log <optional log level, defaults to "info">
```
./onetun <SOURCE_ADDR> <DESTINATION_ADDR> \
--endpoint-addr <public WireGuard endpoint address> \
--endpoint-public-key <the public key of the peer on the endpoint> \
--private-key <private key assigned to onetun> \
--source-peer-ip <IP assigned to onetun> \
--keep-alive <optional persistent keep-alive in seconds> \
--log <optional log level, defaults to "info"
```
> Note: you can use environment variables for all of these flags. Use `onetun --help` for details.
@ -70,7 +30,7 @@ onetun [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...] [...] \
Suppose your WireGuard endpoint has the following configuration, and is accessible from `140.30.3.182:51820`:
```shell
```
# /etc/wireguard/wg0.conf
[Interface]
@ -93,7 +53,7 @@ We want to access a web server on the friendly peer (`192.168.4.2`) on port `808
local port, say `127.0.0.1:8080`, that will tunnel through WireGuard to reach the peer web server:
```shell
onetun 127.0.0.1:8080:192.168.4.2:8080 \
./onetun 127.0.0.1:8080 192.168.4.2:8080 \
--endpoint-addr 140.30.3.182:51820 \
--endpoint-public-key 'PUB_****************************************' \
--private-key 'PRIV_BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' \
@ -103,129 +63,58 @@ onetun 127.0.0.1:8080:192.168.4.2:8080 \
You'll then see this log:
```shell
INFO onetun > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
INFO onetun > Tunnelling [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Which means you can now access the port locally!
```shell
curl 127.0.0.1:8080
```
$ curl 127.0.0.1:8080
Hello world!
```
### Multiple tunnels in parallel
## Download
**onetun** supports running multiple tunnels in parallel. For example:
Normally I would publish `onetun` to crates.io. However, it depends on some features
in [smoltcp](https://github.com/smoltcp-rs/smoltcp) and
[boringtun](https://github.com/cloudflare/boringtun) that haven't been published yet, so I'm forced to use their Git
repos as dependencies for now.
In the meantime, you can download the binary for Windows, macOS (Intel), and Linux (amd64) from
the [Releases](https://github.com/aramperes/onetun/releases) page.
You can also run onetun using [Docker](https://hub.docker.com/r/aramperes/onetun):
```shell
onetun 127.0.0.1:8080:192.168.4.2:8080 127.0.0.1:8081:192.168.4.4:8081
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8081]->[192.168.4.4:8081] (via [140.30.3.182:51820] as peer 192.168.4.3)
docker run --rm --name onetun --user 1000 -p 8080:8080 aramperes/onetun \
0.0.0.0:8080 192.168.4.2:8080 [...options...]
```
... would open TCP ports 8080 and 8081 locally, which forward to their respective ports on the different peers.
#### Maximum number of tunnels
`smoltcp` imposes a compile-time limit on the number of IP addresses assigned to an interface. **onetun** increases
the default value to support most use-cases. In effect, the default limit on the number of **onetun** peers
is **7 per protocol** (TCP and UDP).
Should you need more unique IP addresses to forward ports to, you can increase the limit in `.cargo/config.toml` and recompile **onetun**.
### UDP Support
**onetun** supports UDP forwarding. You can add `:UDP` at the end of the port-forward configuration, or `UDP,TCP` to support
both protocols on the same port (note that this opens 2 separate tunnels, just on the same port)
You can also build onetun locally, using Rust:
```shell
onetun 127.0.0.1:8080:192.168.4.2:8080:UDP
INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
onetun 127.0.0.1:8080:192.168.4.2:8080:UDP,TCP
INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Note: UDP support is totally experimental. You should read the UDP portion of the **Architecture** section before using
it in any production capacity.
### IPv6 Support
**onetun** supports both IPv4 and IPv6. In fact, you can use onetun to forward some IP version to another, e.g. 6-to-4:
```shell
onetun [::1]:8080:192.168.4.2:8080
INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Note that each tunnel can only support one "source" IP version and one "destination" IP version. If you want to support
both IPv4 and IPv6 on the same port, you should create a second port-forward:
```shell
onetun [::1]:8080:192.168.4.2:8080 127.0.0.1:8080:192.168.4.2:8080
INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
### Packet Capture
For debugging purposes, you can enable the capture of IP packets sent between onetun and the WireGuard peer.
The output is a libpcap capture file that can be viewed with Wireshark.
```shell
onetun --pcap wg.pcap 127.0.0.1:8080:192.168.4.2:8080
INFO onetun::pcap > Capturing WireGuard IP packets to wg.pcap
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
To capture packets sent to and from the onetun local port, you must use an external tool like `tcpdump` with root access:
```shell
sudo tcpdump -i lo -w local.pcap 'dst 127.0.0.1 && port 8080'
```
### WireGuard Options
By default, onetun will create the UDP socket to communicate with the WireGuard endpoint on all interfaces and on a dynamic port,
i.e. `0.0.0.0:0` for IPv4 endpoints, or `[::]:0` for IPv6.
You can bind to a static address instead using `--endpoint-bind-addr`:
```shell
onetun --endpoint-bind-addr 0.0.0.0:51820 --endpoint-addr 140.30.3.182:51820 [...]
```
The security of the WireGuard connection can be further enhanced with a **pre-shared key** (PSK). You can generate such a key with the `wg genpsk` command, and provide it using `--preshared-key`.
The peer must also have this key configured using the `PresharedKey` option.
```shell
onetun --preshared-key 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' [...]
$ git clone https://github.com/aramperes/onetun && cd onetun
$ cargo build --release
$ ./target/release/onetun
```
## Architecture
**In short:** onetun uses [smoltcp's](https://github.com/smoltcp-rs/smoltcp) TCP/IP and UDP stack to generate IP packets
using its state machine ("virtual interface"). The generated IP packets are
encrypted by [boringtun](https://github.com/cloudflare/boringtun) and sent to the WireGuard endpoint. Encrypted IP packets received
from the WireGuard endpoint are decrypted using boringtun and sent through the smoltcp virtual interface state machine.
onetun creates "virtual sockets" in the virtual interface to forward data sent from inbound connections,
as well as to receive data from the virtual interface to forward back to the local client.
---
onetun uses [tokio](https://github.com/tokio-rs/tokio), the async runtime, to listen for new TCP connections on the
given port.
When a client connects to the onetun's TCP port, a "virtual client" is
created in a [smoltcp](https://github.com/smoltcp-rs/smoltcp) "virtual" TCP/IP interface, which runs fully inside the onetun
process. An ephemeral "virtual port" is assigned to the "virtual client", which maps back to the local client.
When a client connects to the local TCP port, it uses [smoltcp](https://github.com/smoltcp-rs/smoltcp) to
create a "virtual interface", with a "virtual client" and a "virtual server" for the connection. These "virtual"
components are the crux of how onetun works. They essentially replace the host's TCP/IP stack with smoltcp's, which
fully runs inside onetun. An ephemeral "virtual port" is also assigned to the connection, in order to route packets
back to the right connection.
When the real client opens the connection, the virtual client socket opens a TCP connection to the virtual server
(a dummy socket bound to the remote host/port). The virtual interface in turn crafts the `SYN` segment and wraps it in an IP packet.
When the real client opens the connection, the virtual client socket opens a TCP connection to the virtual server.
The virtual interface (implemented by smoltcp) in turn crafts the `SYN` segment and wraps it in an IP packet.
Because of how the virtual client and server are configured, the IP packet is crafted with a source address
being the configured `source-peer-ip` (`192.168.4.3` in the example above),
and the destination address matches the port-forward's configured destination (`192.168.4.2`).
and the destination address is the remote peer's (`192.168.4.2`).
By doing this, we let smoltcp handle the crafting of the IP packets, and the handling of the client's TCP states.
Instead of actually sending those packets to the virtual server,
@ -236,13 +125,13 @@ Once the WireGuard endpoint receives an encrypted IP packet, it decrypts it usin
It reads the destination address, re-encrypts the IP packet using the matching peer's public key, and sends it off to
the peer's UDP endpoint.
The peer receives the encrypted IP and decrypts it. It can then read the inner payload (the TCP segment),
forward it to the server's port, which handles the TCP segment. The TCP server responds with `SYN-ACK`, which goes back through
The remote peer receives the encrypted IP and decrypts it. It can then read the inner payload (the TCP segment),
forward it to the server's port, which handles the TCP segment. The server responds with `SYN-ACK`, which goes back through
the peer's local WireGuard interface, gets encrypted, forwarded to the WireGuard endpoint, and then finally back to onetun's UDP port.
When onetun receives an encrypted packet from the WireGuard endpoint, it decrypts it using boringtun.
The resulting IP packet is dispatched to the corresponding virtual interface running inside onetun;
the IP packet is then read and processed by the virtual interface, and the virtual client's TCP state is updated.
The resulting IP packet is broadcasted to all virtual interfaces running inside onetun; once the corresponding
interface is matched, the IP packet is read and unpacked, and the virtual client's TCP state is updated.
Whenever data is sent by the real client, it is simply "sent" by the virtual client, which kicks off the whole IP encapsulation
and WireGuard encryption again. When data is sent by the real server, it ends up routed in the virtual interface, which allows
@ -251,72 +140,6 @@ the virtual client to read it. When the virtual client reads data, it simply pus
This work is all made possible by [smoltcp](https://github.com/smoltcp-rs/smoltcp) and [boringtun](https://github.com/cloudflare/boringtun),
so special thanks to the developers of those libraries.
### UDP
UDP support is experimental. Since UDP messages are stateless, there is no perfect way for onetun to know when to release the
assigned virtual port back to the pool for a new peer to use. This would cause issues over time as running out of virtual ports
would mean new datagrams get dropped. To alleviate this, onetun will cap the amount of ports used by one peer IP address;
if another datagram comes in from a different port but with the same IP, the least recently used virtual port will be freed and assigned
to the new peer port. At that point, any datagram packets destined for the reused virtual port will be routed to the new peer,
and any datagrams received by the old peer will be dropped.
In addition, in cases where many IPs are exhausting the UDP virtual port pool in tandem, and a totally new peer IP sends data,
onetun will have to pick the least recently used virtual port from _any_ peer IP and reuse it. However, this is only allowed
if the least recently used port hasn't been used for a certain amount of time. If all virtual ports are truly "active"
(with at least one transmission within that time limit), the new datagram gets dropped due to exhaustion.
All in all, I would not recommend using UDP forwarding for public services, since it's most likely prone to simple DoS or DDoS.
## HTTP/SOCKS Proxy
**onetun** is a Transport-layer proxy (also known as port forwarding); it is not in scope to provide
a HTTP/SOCKS proxy server. However, you can easily chain **onetun** with a proxy server on a remote
that is locked down to your WireGuard network.
For example, you could run [dante-server](https://www.inet.no/dante/) on a peer (ex. `192.168.4.2`) with the following configuration:
```
# /etc/danted.conf
logoutput: syslog
user.privileged: root
user.unprivileged: nobody
internal: 192.168.4.2 port=1080
external: eth0
socksmethod: none
clientmethod: none
# Locks down proxy use to WireGuard peers (192.168.4.x)
client pass {
from: 192.168.4.0/24 to: 0.0.0.0/0
}
socks pass {
from: 192.168.4.0/24 to: 0.0.0.0/0
}
```
Then use **onetun** to expose the SOCKS5 proxy locally:
```shell
onetun 127.0.0.1:1080:192.168.4.2:1080
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:1080]->[192.168.4.2:1080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Test with `curl` (or configure your browser):
```shell
curl -x socks5://127.0.0.1:1080 https://ifconfig.me
```
## Contributing and Maintenance
I will gladly accept contributions to onetun, and set aside time to review all pull-requests.
Please consider opening a GitHub issue if you are unsure if your contribution is within the scope of the project.
**Disclaimer**: I do not have enough personal time to actively maintain onetun besides open-source contributions.
## License
MIT License. See `LICENSE` for details. Copyright &copy; 2025 Aram Peres.
MIT. See `LICENSE` for details.

View file

@ -1,339 +1,130 @@
use std::collections::HashSet;
use std::convert::TryFrom;
use std::fmt::{Display, Formatter};
use std::fs::read_to_string;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use std::sync::Arc;
use anyhow::{bail, Context};
pub use boringtun::x25519::{PublicKey, StaticSecret};
use anyhow::Context;
use boringtun::crypto::{X25519PublicKey, X25519SecretKey};
use clap::{App, Arg};
const DEFAULT_PORT_FORWARD_SOURCE: &str = "127.0.0.1";
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct Config {
pub port_forwards: Vec<PortForwardConfig>,
#[allow(dead_code)]
pub remote_port_forwards: Vec<PortForwardConfig>,
pub private_key: Arc<StaticSecret>,
pub endpoint_public_key: Arc<PublicKey>,
pub preshared_key: Option<[u8; 32]>,
pub endpoint_addr: SocketAddr,
pub endpoint_bind_addr: SocketAddr,
pub source_peer_ip: IpAddr,
pub keepalive_seconds: Option<u16>,
pub max_transmission_unit: usize,
pub log: String,
pub warnings: Vec<String>,
pub pcap_file: Option<String>,
pub(crate) source_addr: SocketAddr,
pub(crate) dest_addr: SocketAddr,
pub(crate) private_key: Arc<X25519SecretKey>,
pub(crate) endpoint_public_key: Arc<X25519PublicKey>,
pub(crate) endpoint_addr: SocketAddr,
pub(crate) source_peer_ip: IpAddr,
pub(crate) keepalive_seconds: Option<u16>,
pub(crate) log: String,
}
impl Config {
#[cfg(feature = "bin")]
pub fn from_args() -> anyhow::Result<Self> {
use clap::{Arg, Command};
let mut warnings = vec![];
let matches = Command::new("onetun")
let matches = App::new("onetun")
.author("Aram Peres <aram.peres@gmail.com>")
.version(env!("CARGO_PKG_VERSION"))
.args(&[
Arg::new("PORT_FORWARD")
.required(false)
.num_args(1..)
.help("Port forward configurations. The format of each argument is [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \
where [src_host] is the local IP to listen on, <src_port> is the local port to listen on, <dst_host> is the remote peer IP to forward to, and <dst_port> is the remote port to forward to. \
Environment variables of the form 'ONETUN_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\
Examples:\n\
\t127.0.0.1:8080:192.168.4.1:8081:TCP,UDP\n\
\t127.0.0.1:8080:192.168.4.1:8081:TCP\n\
\t0.0.0.0:8080:192.168.4.1:8081\n\
\t[::1]:8080:192.168.4.1:8081\n\
\t8080:192.168.4.1:8081\n\
\t8080:192.168.4.1:8081:TCP\n\
\tlocalhost:8080:192.168.4.1:8081:TCP\n\
\tlocalhost:8080:peer.intranet:8081:TCP\
"),
Arg::new("private-key")
.conflicts_with("private-key-file")
.num_args(1)
Arg::with_name("SOURCE_ADDR")
.required(true)
.takes_value(true)
.env("ONETUN_SOURCE_ADDR")
.help("The source address (IP + port) to forward from. Example: 127.0.0.1:2115"),
Arg::with_name("DESTINATION_ADDR")
.required(true)
.takes_value(true)
.env("ONETUN_DESTINATION_ADDR")
.help("The destination address (IP + port) to forward to. The IP should be a peer registered in the Wireguard endpoint. Example: 192.168.4.2:2116"),
Arg::with_name("private-key")
.required(true)
.takes_value(true)
.long("private-key")
.env("ONETUN_PRIVATE_KEY")
.help("The private key of this peer. The corresponding public key should be registered in the WireGuard endpoint. \
You can also use '--private-key-file' to specify a file containing the key instead."),
Arg::new("private-key-file")
.num_args(1)
.long("private-key-file")
.env("ONETUN_PRIVATE_KEY_FILE")
.help("The path to a file containing the private key of this peer. The corresponding public key should be registered in the WireGuard endpoint."),
Arg::new("endpoint-public-key")
.help("The private key of this peer. The corresponding public key should be registered in the Wireguard endpoint."),
Arg::with_name("endpoint-public-key")
.required(true)
.num_args(1)
.takes_value(true)
.long("endpoint-public-key")
.env("ONETUN_ENDPOINT_PUBLIC_KEY")
.help("The public key of the WireGuard endpoint (remote)."),
Arg::new("preshared-key")
.required(false)
.num_args(1)
.long("preshared-key")
.env("ONETUN_PRESHARED_KEY")
.help("The pre-shared key (PSK) as configured with the peer."),
Arg::new("endpoint-addr")
.help("The public key of the Wireguard endpoint (remote)."),
Arg::with_name("endpoint-addr")
.required(true)
.num_args(1)
.takes_value(true)
.long("endpoint-addr")
.env("ONETUN_ENDPOINT_ADDR")
.help("The address (IP + port) of the WireGuard endpoint (remote). Example: 1.2.3.4:51820"),
Arg::new("endpoint-bind-addr")
.required(false)
.num_args(1)
.long("endpoint-bind-addr")
.env("ONETUN_ENDPOINT_BIND_ADDR")
.help("The address (IP + port) used to bind the local UDP socket for the WireGuard tunnel. Example: 1.2.3.4:30000. Defaults to 0.0.0.0:0 for IPv4 endpoints, or [::]:0 for IPv6 endpoints."),
Arg::new("source-peer-ip")
.help("The address (IP + port) of the Wireguard endpoint (remote). Example: 1.2.3.4:51820"),
Arg::with_name("source-peer-ip")
.required(true)
.num_args(1)
.takes_value(true)
.long("source-peer-ip")
.env("ONETUN_SOURCE_PEER_IP")
.help("The source IP to identify this peer as (local). Example: 192.168.4.3"),
Arg::new("keep-alive")
Arg::with_name("keep-alive")
.required(false)
.num_args(1)
.takes_value(true)
.long("keep-alive")
.env("ONETUN_KEEP_ALIVE")
.help("Configures a persistent keep-alive for the WireGuard tunnel, in seconds."),
Arg::new("max-transmission-unit")
Arg::with_name("log")
.required(false)
.num_args(1)
.long("max-transmission-unit")
.env("ONETUN_MTU")
.default_value("1420")
.help("Configures the max-transmission-unit (MTU) of the WireGuard tunnel."),
Arg::new("log")
.required(false)
.num_args(1)
.takes_value(true)
.long("log")
.env("ONETUN_LOG")
.default_value("info")
.help("Configures the log level and format."),
Arg::new("pcap")
.required(false)
.num_args(1)
.long("pcap")
.env("ONETUN_PCAP")
.help("Decrypts and captures IP packets on the WireGuard tunnel to a given output file."),
Arg::new("remote")
.required(false)
.num_args(1..)
.long("remote")
.short('r')
.help("Remote port forward configurations. The format of each argument is <src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \
where <src_port> is the port the other peers will reach the server with, <dst_host> is the IP to forward to, and <dst_port> is the port to forward to. \
The <src_port> will be bound on onetun's peer IP, as specified by --source-peer-ip. If you pass a different value for <src_host> here, it will be rejected.\n\
Note: <dst_host>:<dst_port> must be reachable by onetun. If referring to another WireGuard peer, use --bridge instead (not supported yet).\n\
Environment variables of the form 'ONETUN_REMOTE_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\
Examples:\n\
\t--remote 8080:localhost:8081:TCP,UDP\n\
\t--remote 8080:[::1]:8081:TCP\n\
\t--remote 8080:google.com:80\
"),
.help("Configures the log level and format.")
]).get_matches();
// Combine `PORT_FORWARD` arg and `ONETUN_PORT_FORWARD_#` envs
let mut port_forward_strings = HashSet::new();
if let Some(values) = matches.get_many::<String>("PORT_FORWARD") {
for value in values {
port_forward_strings.insert(value.to_owned());
}
}
for n in 1.. {
if let Ok(env) = std::env::var(format!("ONETUN_PORT_FORWARD_{}", n)) {
port_forward_strings.insert(env);
} else {
break;
}
}
// Parse `PORT_FORWARD` strings into `PortForwardConfig`
let port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> = port_forward_strings
.into_iter()
.map(|s| PortForwardConfig::from_notation(&s, DEFAULT_PORT_FORWARD_SOURCE))
.collect();
let port_forwards: Vec<PortForwardConfig> = port_forwards
.context("Failed to parse port forward config")?
.into_iter()
.flatten()
.collect();
// Read source-peer-ip
let source_peer_ip = parse_ip(matches.get_one::<String>("source-peer-ip"))
.context("Invalid source peer IP")?;
// Combined `remote` arg and `ONETUN_REMOTE_PORT_FORWARD_#` envs
let mut port_forward_strings = HashSet::new();
if let Some(values) = matches.get_many::<String>("remote") {
for value in values {
port_forward_strings.insert(value.to_owned());
}
}
for n in 1.. {
if let Ok(env) = std::env::var(format!("ONETUN_REMOTE_PORT_FORWARD_{}", n)) {
port_forward_strings.insert(env);
} else {
break;
}
}
// Parse `PORT_FORWARD` strings into `PortForwardConfig`
let remote_port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> =
port_forward_strings
.into_iter()
.map(|s| {
PortForwardConfig::from_notation(
&s,
matches.get_one::<String>("source-peer-ip").unwrap(),
)
})
.collect();
let mut remote_port_forwards: Vec<PortForwardConfig> = remote_port_forwards
.context("Failed to parse remote port forward config")?
.into_iter()
.flatten()
.collect();
for port_forward in remote_port_forwards.iter_mut() {
if port_forward.source.ip() != source_peer_ip {
bail!("Remote port forward config <src_host> must match --source-peer-ip ({}), or be omitted.", source_peer_ip);
}
port_forward.source = SocketAddr::from((source_peer_ip, port_forward.source.port()));
port_forward.remote = true;
}
if port_forwards.is_empty() && remote_port_forwards.is_empty() {
bail!("No port forward configurations given.");
}
// Read private key from file or CLI argument
let (group_readable, world_readable) = matches
.get_one::<String>("private-key-file")
.and_then(is_file_insecurely_readable)
.unwrap_or_default();
if group_readable {
warnings.push("Private key file is group-readable. This is insecure.".into());
}
if world_readable {
warnings.push("Private key file is world-readable. This is insecure.".into());
}
let private_key = if let Some(private_key_file) =
matches.get_one::<String>("private-key-file")
{
read_to_string(private_key_file)
.map(|s| s.trim().to_string())
.context("Failed to read private key file")
} else {
if std::env::var("ONETUN_PRIVATE_KEY").is_err() {
warnings.push("Private key was passed using CLI. This is insecure. \
Use \"--private-key-file <file containing private key>\", or the \"ONETUN_PRIVATE_KEY\" env variable instead.".into());
}
matches
.get_one::<String>("private-key")
.cloned()
.context("Missing private key")
}?;
let endpoint_addr = parse_addr(matches.get_one::<String>("endpoint-addr"))
.context("Invalid endpoint address")?;
let endpoint_bind_addr = if let Some(addr) = matches.get_one::<String>("endpoint-bind-addr")
{
let addr = parse_addr(Some(addr)).context("Invalid bind address")?;
// Make sure the bind address and endpoint address are the same IP version
if addr.ip().is_ipv4() != endpoint_addr.ip().is_ipv4() {
bail!("Endpoint and bind addresses must be the same IP version");
}
addr
} else {
// Return the IP version of the endpoint address
match endpoint_addr {
SocketAddr::V4(_) => parse_addr(Some("0.0.0.0:0"))?,
SocketAddr::V6(_) => parse_addr(Some("[::]:0"))?,
}
};
Ok(Self {
port_forwards,
remote_port_forwards,
private_key: Arc::new(parse_private_key(&private_key).context("Invalid private key")?),
endpoint_public_key: Arc::new(
parse_public_key(matches.get_one::<String>("endpoint-public-key"))
.context("Invalid endpoint public key")?,
source_addr: parse_addr(matches.value_of("SOURCE_ADDR"))
.with_context(|| "Invalid source address")?,
dest_addr: parse_addr(matches.value_of("DESTINATION_ADDR"))
.with_context(|| "Invalid destination address")?,
private_key: Arc::new(
parse_private_key(matches.value_of("private-key"))
.with_context(|| "Invalid private key")?,
),
preshared_key: parse_preshared_key(matches.get_one::<String>("preshared-key"))?,
endpoint_addr,
endpoint_bind_addr,
source_peer_ip,
keepalive_seconds: parse_keep_alive(matches.get_one::<String>("keep-alive"))
.context("Invalid keep-alive value")?,
max_transmission_unit: parse_mtu(matches.get_one::<String>("max-transmission-unit"))
.context("Invalid max-transmission-unit value")?,
log: matches
.get_one::<String>("log")
.cloned()
.unwrap_or_default(),
pcap_file: matches.get_one::<String>("pcap").cloned(),
warnings,
endpoint_public_key: Arc::new(
parse_public_key(matches.value_of("endpoint-public-key"))
.with_context(|| "Invalid endpoint public key")?,
),
endpoint_addr: parse_addr(matches.value_of("endpoint-addr"))
.with_context(|| "Invalid endpoint address")?,
source_peer_ip: parse_ip(matches.value_of("source-peer-ip"))
.with_context(|| "Invalid source peer IP")?,
keepalive_seconds: parse_keep_alive(matches.value_of("keep-alive"))
.with_context(|| "Invalid keep-alive value")?,
log: matches.value_of("log").unwrap_or_default().into(),
})
}
}
fn parse_addr<T: AsRef<str>>(s: Option<T>) -> anyhow::Result<SocketAddr> {
s.context("Missing address")?
.as_ref()
fn parse_addr(s: Option<&str>) -> anyhow::Result<SocketAddr> {
s.with_context(|| "Missing address")?
.to_socket_addrs()
.context("Invalid address")?
.with_context(|| "Invalid address")?
.next()
.context("Could not lookup address")
.with_context(|| "Could not lookup address")
}
fn parse_ip(s: Option<&String>) -> anyhow::Result<IpAddr> {
s.context("Missing IP address")?
fn parse_ip(s: Option<&str>) -> anyhow::Result<IpAddr> {
s.with_context(|| "Missing IP")?
.parse::<IpAddr>()
.context("Invalid IP address")
.with_context(|| "Invalid IP address")
}
fn parse_private_key(s: &str) -> anyhow::Result<StaticSecret> {
let decoded = base64::decode(s).context("Failed to decode private key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(StaticSecret::from(bytes))
} else {
bail!("Invalid private key")
}
fn parse_private_key(s: Option<&str>) -> anyhow::Result<X25519SecretKey> {
s.with_context(|| "Missing private key")?
.parse::<X25519SecretKey>()
.map_err(|e| anyhow::anyhow!("{}", e))
.with_context(|| "Invalid private key")
}
fn parse_public_key(s: Option<&String>) -> anyhow::Result<PublicKey> {
let encoded = s.context("Missing public key")?;
let decoded = base64::decode(encoded).context("Failed to decode public key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(PublicKey::from(bytes))
} else {
bail!("Invalid public key")
}
fn parse_public_key(s: Option<&str>) -> anyhow::Result<X25519PublicKey> {
s.with_context(|| "Missing public key")?
.parse::<X25519PublicKey>()
.map_err(|e| anyhow::anyhow!("{}", e))
.with_context(|| "Invalid public key")
}
fn parse_preshared_key(s: Option<&String>) -> anyhow::Result<Option<[u8; 32]>> {
if let Some(s) = s {
let decoded = base64::decode(s).context("Failed to decode preshared key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(Some(bytes))
} else {
bail!("Invalid preshared key")
}
} else {
Ok(None)
}
}
fn parse_keep_alive(s: Option<&String>) -> anyhow::Result<Option<u16>> {
fn parse_keep_alive(s: Option<&str>) -> anyhow::Result<Option<u16>> {
if let Some(s) = s {
let parsed: u16 = s.parse().with_context(|| {
format!(
@ -346,369 +137,3 @@ fn parse_keep_alive(s: Option<&String>) -> anyhow::Result<Option<u16>> {
Ok(None)
}
}
fn parse_mtu(s: Option<&String>) -> anyhow::Result<usize> {
s.context("Missing MTU")?.parse().context("Invalid MTU")
}
#[cfg(unix)]
fn is_file_insecurely_readable(path: &String) -> Option<(bool, bool)> {
use std::fs::File;
use std::os::unix::fs::MetadataExt;
let mode = File::open(path).ok()?.metadata().ok()?.mode();
Some((mode & 0o40 > 0, mode & 0o4 > 0))
}
#[cfg(not(unix))]
fn is_file_insecurely_readable(_path: &String) -> Option<(bool, bool)> {
// No good way to determine permissions on non-Unix target
None
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct PortForwardConfig {
/// The source IP and port where the local server will run.
pub source: SocketAddr,
/// The destination IP and port to which traffic will be forwarded.
pub destination: SocketAddr,
/// The transport protocol to use for the port (Layer 4).
pub protocol: PortProtocol,
/// Whether this is a remote port forward.
pub remote: bool,
}
impl PortForwardConfig {
/// Converts a string representation into `PortForwardConfig`.
///
/// Sample formats:
/// - `127.0.0.1:8080:192.168.4.1:8081:TCP,UDP`
/// - `127.0.0.1:8080:192.168.4.1:8081:TCP`
/// - `0.0.0.0:8080:192.168.4.1:8081`
/// - `[::1]:8080:192.168.4.1:8081`
/// - `8080:192.168.4.1:8081`
/// - `8080:192.168.4.1:8081:TCP`
/// - `localhost:8080:192.168.4.1:8081:TCP`
/// - `localhost:8080:peer.intranet:8081:TCP`
///
/// Implementation Notes:
/// - The format is formalized as `[src_host:]<src_port>:<dst_host>:<dst_port>[:PROTO1,PROTO2,...]`
/// - `src_host` is optional and defaults to `127.0.0.1`.
/// - `src_host` and `dst_host` may be specified as IPv4, IPv6, or a FQDN to be resolved by DNS.
/// - IPv6 addresses must be prefixed with `[` and suffixed with `]`. Example: `[::1]`.
/// - Any `u16` is accepted as `src_port` and `dst_port`
/// - Specifying protocols (`PROTO1,PROTO2,...`) is optional and defaults to `TCP`. Values must be separated by commas.
pub fn from_notation(s: &str, default_source: &str) -> anyhow::Result<Vec<PortForwardConfig>> {
mod parsers {
use nom::branch::alt;
use nom::bytes::complete::is_not;
use nom::character::complete::{alpha1, char, digit1};
use nom::combinator::{complete, map, opt, success};
use nom::error::ErrorKind;
use nom::multi::separated_list1;
use nom::sequence::{delimited, preceded, separated_pair, tuple};
use nom::IResult;
fn ipv6(s: &str) -> IResult<&str, &str> {
delimited(char('['), is_not("]"), char(']'))(s)
}
fn ipv4_or_fqdn(s: &str) -> IResult<&str, &str> {
let s = is_not(":")(s)?;
if s.1.chars().all(|c| c.is_ascii_digit()) {
// If ipv4 or fqdn is all digits, it's not valid.
Err(nom::Err::Error(nom::error::ParseError::from_error_kind(
s.1,
ErrorKind::Fail,
)))
} else {
Ok(s)
}
}
fn port(s: &str) -> IResult<&str, &str> {
digit1(s)
}
fn ip_or_fqdn(s: &str) -> IResult<&str, &str> {
alt((ipv6, ipv4_or_fqdn))(s)
}
fn no_ip(s: &str) -> IResult<&str, Option<&str>> {
success(None)(s)
}
fn src_addr(s: &str) -> IResult<&str, (Option<&str>, &str)> {
let with_ip = separated_pair(map(ip_or_fqdn, Some), char(':'), port);
let without_ip = tuple((no_ip, port));
alt((with_ip, without_ip))(s)
}
fn dst_addr(s: &str) -> IResult<&str, (&str, &str)> {
separated_pair(ip_or_fqdn, char(':'), port)(s)
}
fn protocol(s: &str) -> IResult<&str, &str> {
alpha1(s)
}
fn protocols(s: &str) -> IResult<&str, Option<Vec<&str>>> {
opt(preceded(char(':'), separated_list1(char(','), protocol)))(s)
}
#[allow(clippy::type_complexity)]
pub fn port_forward(
s: &str,
) -> IResult<&str, ((Option<&str>, &str), (), (&str, &str), Option<Vec<&str>>)>
{
complete(tuple((
src_addr,
map(char(':'), |_| ()),
dst_addr,
protocols,
)))(s)
}
}
// TODO: Could improve error management with custom errors, so that the messages are more helpful.
let (src_addr, _, dst_addr, protocols) = parsers::port_forward(s)
.map_err(|e| anyhow::anyhow!("Invalid port-forward definition: {}", e))?
.1;
let source = (
src_addr.0.unwrap_or(default_source),
src_addr.1.parse::<u16>().context("Invalid source port")?,
)
.to_socket_addrs()
.context("Invalid source address")?
.next()
.context("Could not resolve source address")?;
let destination = (
dst_addr.0,
dst_addr.1.parse::<u16>().context("Invalid source port")?,
)
.to_socket_addrs() // TODO: Pass this as given and use DNS config instead (issue #15)
.context("Invalid destination address")?
.next()
.context("Could not resolve destination address")?;
// Parse protocols
let protocols = if let Some(protocols) = protocols {
let protocols: anyhow::Result<Vec<PortProtocol>> =
protocols.into_iter().map(PortProtocol::try_from).collect();
protocols
} else {
Ok(vec![PortProtocol::Tcp])
}
.context("Failed to parse protocols")?;
// Returns an config for each protocol
Ok(protocols
.into_iter()
.map(|protocol| Self {
source,
destination,
protocol,
remote: false,
})
.collect())
}
}
impl Display for PortForwardConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.remote {
write!(
f,
"(remote){}:{}:{}",
self.source, self.destination, self.protocol
)
} else {
write!(f, "{}:{}:{}", self.source, self.destination, self.protocol)
}
}
}
/// Layer 7 protocols for ports.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub enum PortProtocol {
/// TCP
Tcp,
/// UDP
Udp,
}
impl TryFrom<&str> for PortProtocol {
type Error = anyhow::Error;
fn try_from(value: &str) -> anyhow::Result<Self> {
match value.to_uppercase().as_str() {
"TCP" => Ok(Self::Tcp),
"UDP" => Ok(Self::Udp),
_ => Err(anyhow::anyhow!("Invalid protocol specifier: {}", value)),
}
}
}
impl Display for PortProtocol {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Self::Tcp => "TCP",
Self::Udp => "UDP",
}
)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_1() {
assert_eq!(
PortForwardConfig::from_notation(
"192.168.0.1:8080:192.168.4.1:8081:TCP,UDP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![
PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
},
PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Udp,
remote: false,
}
]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_2() {
assert_eq!(
PortForwardConfig::from_notation(
"192.168.0.1:8080:192.168.4.1:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_3() {
assert_eq!(
PortForwardConfig::from_notation(
"0.0.0.0:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("0.0.0.0:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_4() {
assert_eq!(
PortForwardConfig::from_notation(
"[::1]:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("[::1]:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_5() {
assert_eq!(
PortForwardConfig::from_notation("8080:192.168.4.1:8081", DEFAULT_PORT_FORWARD_SOURCE)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("127.0.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_6() {
assert_eq!(
PortForwardConfig::from_notation(
"8080:192.168.4.1:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("127.0.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_7() {
assert_eq!(
PortForwardConfig::from_notation(
"localhost:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_8() {
assert_eq!(
PortForwardConfig::from_notation(
"localhost:8080:localhost:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(),
destination: "localhost:8081".to_socket_addrs().unwrap().next().unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
}

View file

@ -1,190 +0,0 @@
use bytes::Bytes;
use std::fmt::{Display, Formatter};
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use crate::config::PortForwardConfig;
use crate::virtual_iface::VirtualPort;
use crate::PortProtocol;
/// Events that go on the bus between the local server, smoltcp, and WireGuard.
#[derive(Debug, Clone)]
pub enum Event {
/// Dumb event with no data.
Dumb,
/// A new connection with the local server was initiated, and the given virtual port was assigned.
ClientConnectionInitiated(PortForwardConfig, VirtualPort),
/// A connection was dropped from the pool and should be closed in all interfaces.
ClientConnectionDropped(VirtualPort),
/// Data received by the local server that should be sent to the virtual server.
LocalData(PortForwardConfig, VirtualPort, Bytes),
/// Data received by the remote server that should be sent to the local client.
RemoteData(VirtualPort, Bytes),
/// IP packet received from the WireGuard tunnel that should be passed through the corresponding virtual device.
InboundInternetPacket(PortProtocol, Bytes),
/// IP packet to be sent through the WireGuard tunnel as crafted by the virtual device.
OutboundInternetPacket(Bytes),
/// Notifies that a virtual device read an IP packet.
VirtualDeviceFed(PortProtocol),
}
impl Display for Event {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Event::Dumb => {
write!(f, "Dumb{{}}")
}
Event::ClientConnectionInitiated(pf, vp) => {
write!(f, "ClientConnectionInitiated{{ pf={} vp={} }}", pf, vp)
}
Event::ClientConnectionDropped(vp) => {
write!(f, "ClientConnectionDropped{{ vp={} }}", vp)
}
Event::LocalData(pf, vp, data) => {
let size = data.len();
write!(f, "LocalData{{ pf={} vp={} size={} }}", pf, vp, size)
}
Event::RemoteData(vp, data) => {
let size = data.len();
write!(f, "RemoteData{{ vp={} size={} }}", vp, size)
}
Event::InboundInternetPacket(proto, data) => {
let size = data.len();
write!(
f,
"InboundInternetPacket{{ proto={} size={} }}",
proto, size
)
}
Event::OutboundInternetPacket(data) => {
let size = data.len();
write!(f, "OutboundInternetPacket{{ size={} }}", size)
}
Event::VirtualDeviceFed(proto) => {
write!(f, "VirtualDeviceFed{{ proto={} }}", proto)
}
}
}
}
#[derive(Clone)]
pub struct Bus {
counter: Arc<AtomicU32>,
bus: Arc<tokio::sync::broadcast::Sender<(u32, Event)>>,
}
impl Bus {
/// Creates a new event bus.
pub fn new() -> Self {
let (bus, _) = tokio::sync::broadcast::channel(1000);
let bus = Arc::new(bus);
let counter = Arc::new(AtomicU32::default());
Self { bus, counter }
}
/// Creates a new endpoint on the event bus.
pub fn new_endpoint(&self) -> BusEndpoint {
let id = self.counter.fetch_add(1, Ordering::Relaxed);
let tx = (*self.bus).clone();
let rx = self.bus.subscribe();
let tx = BusSender { id, tx };
BusEndpoint { id, tx, rx }
}
}
impl Default for Bus {
fn default() -> Self {
Self::new()
}
}
pub struct BusEndpoint {
id: u32,
tx: BusSender,
rx: tokio::sync::broadcast::Receiver<(u32, Event)>,
}
impl BusEndpoint {
/// Sends the event on the bus. Note that the messages sent by this endpoint won't reach itself.
pub fn send(&self, event: Event) {
self.tx.send(event)
}
/// Returns the unique sequential ID of this endpoint.
pub fn id(&self) -> u32 {
self.id
}
/// Awaits the next `Event` on the bus to be read.
pub async fn recv(&mut self) -> Event {
loop {
match self.rx.recv().await {
Ok((id, event)) => {
if id == self.id {
// If the event was sent by this endpoint, it is skipped
continue;
} else {
return event;
}
}
Err(_) => {
error!("Failed to read event bus from endpoint #{}", self.id);
return futures::future::pending().await;
}
}
}
}
/// Creates a new sender for this endpoint that can be cloned.
pub fn sender(&self) -> BusSender {
self.tx.clone()
}
}
#[derive(Clone)]
pub struct BusSender {
id: u32,
tx: tokio::sync::broadcast::Sender<(u32, Event)>,
}
impl BusSender {
/// Sends the event on the bus. Note that the messages sent by this endpoint won't reach itself.
pub fn send(&self, event: Event) {
trace!("#{} -> {}", self.id, event);
match self.tx.send((self.id, event)) {
Ok(_) => {}
Err(_) => error!("Failed to send event to bus from endpoint #{}", self.id),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_bus() {
let bus = Bus::new();
let mut endpoint_1 = bus.new_endpoint();
let mut endpoint_2 = bus.new_endpoint();
let mut endpoint_3 = bus.new_endpoint();
assert_eq!(endpoint_1.id(), 0);
assert_eq!(endpoint_2.id(), 1);
assert_eq!(endpoint_3.id(), 2);
endpoint_1.send(Event::Dumb);
let recv_2 = endpoint_2.recv().await;
let recv_3 = endpoint_3.recv().await;
assert!(matches!(recv_2, Event::Dumb));
assert!(matches!(recv_3, Event::Dumb));
endpoint_2.send(Event::Dumb);
let recv_1 = endpoint_1.recv().await;
let recv_3 = endpoint_3.recv().await;
assert!(matches!(recv_1, Event::Dumb));
assert!(matches!(recv_3, Event::Dumb));
}
}

View file

@ -1,122 +0,0 @@
#[macro_use]
extern crate log;
use std::sync::Arc;
use anyhow::Context;
use crate::config::{Config, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::tcp::TcpVirtualInterface;
use crate::virtual_iface::udp::UdpVirtualInterface;
use crate::virtual_iface::VirtualInterfacePoll;
use crate::wg::WireGuardTunnel;
pub mod config;
pub mod events;
#[cfg(feature = "pcap")]
pub mod pcap;
pub mod tunnel;
pub mod virtual_device;
pub mod virtual_iface;
pub mod wg;
/// Starts the onetun tunnels in separate tokio tasks.
///
/// Note: This future completes immediately.
pub async fn start_tunnels(config: Config, bus: Bus) -> anyhow::Result<()> {
// Initialize the port pool for each protocol
let tcp_port_pool = TcpPortPool::new();
let udp_port_pool = UdpPortPool::new();
#[cfg(feature = "pcap")]
if let Some(pcap_file) = config.pcap_file.clone() {
// Start packet capture
let bus = bus.clone();
tokio::spawn(async move { pcap::capture(pcap_file, bus).await });
}
let wg = WireGuardTunnel::new(&config, bus.clone())
.await
.context("Failed to initialize WireGuard tunnel")?;
let wg = Arc::new(wg);
{
// Start routine task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.routine_task().await });
}
{
// Start consumption task for WireGuard
let wg = wg.clone();
tokio::spawn(Box::pin(async move { wg.consume_task().await }));
}
{
// Start production task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.produce_task().await });
}
if config
.port_forwards
.iter()
.any(|pf| pf.protocol == PortProtocol::Tcp)
{
// TCP device
let bus = bus.clone();
let device =
VirtualIpDevice::new(PortProtocol::Tcp, bus.clone(), config.max_transmission_unit);
// Start TCP Virtual Interface
let port_forwards = config.port_forwards.clone();
let iface = TcpVirtualInterface::new(port_forwards, bus, config.source_peer_ip);
tokio::spawn(async move { iface.poll_loop(device).await });
}
if config
.port_forwards
.iter()
.any(|pf| pf.protocol == PortProtocol::Udp)
{
// UDP device
let bus = bus.clone();
let device =
VirtualIpDevice::new(PortProtocol::Udp, bus.clone(), config.max_transmission_unit);
// Start UDP Virtual Interface
let port_forwards = config.port_forwards.clone();
let iface = UdpVirtualInterface::new(port_forwards, bus, config.source_peer_ip);
tokio::spawn(async move { iface.poll_loop(device).await });
}
{
let port_forwards = config.port_forwards;
let source_peer_ip = config.source_peer_ip;
port_forwards
.into_iter()
.map(|pf| {
(
pf,
wg.clone(),
tcp_port_pool.clone(),
udp_port_pool.clone(),
bus.clone(),
)
})
.for_each(move |(pf, wg, tcp_port_pool, udp_port_pool, bus)| {
tokio::spawn(async move {
tunnel::port_forward(pf, source_peer_ip, tcp_port_pool, udp_port_pool, wg, bus)
.await
.unwrap_or_else(|e| error!("Port-forward failed for {} : {}", pf, e))
});
});
}
Ok(())
}

View file

@ -1,36 +1,395 @@
#[cfg(feature = "bin")]
#[macro_use]
extern crate log;
#[cfg(feature = "bin")]
use std::net::{IpAddr, SocketAddr};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use smoltcp::iface::InterfaceBuilder;
use smoltcp::socket::{SocketSet, TcpSocket, TcpSocketBuffer};
use smoltcp::wire::{IpAddress, IpCidr};
use tokio::net::{TcpListener, TcpStream};
use crate::config::Config;
use crate::port_pool::PortPool;
use crate::virtual_device::VirtualIpDevice;
use crate::wg::WireGuardTunnel;
pub mod config;
pub mod port_pool;
pub mod virtual_device;
pub mod wg;
pub const MAX_PACKET: usize = 65536;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
use anyhow::Context;
use onetun::{config::Config, events::Bus};
let config = Config::from_args().context("Configuration has errors")?;
let config = Config::from_args().with_context(|| "Failed to read config")?;
init_logger(&config)?;
let port_pool = Arc::new(PortPool::new());
for warning in &config.warnings {
warn!("{}", warning);
let wg = WireGuardTunnel::new(&config, port_pool.clone())
.await
.with_context(|| "Failed to initialize WireGuard tunnel")?;
let wg = Arc::new(wg);
{
// Start routine task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.routine_task().await });
}
let bus = Bus::default();
onetun::start_tunnels(config, bus).await?;
{
// Start consumption task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.consume_task().await });
}
futures::future::pending().await
{
// Start IP broadcast drain task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.broadcast_drain_task().await });
}
info!(
"Tunnelling [{}]->[{}] (via [{}] as peer {})",
&config.source_addr, &config.dest_addr, &config.endpoint_addr, &config.source_peer_ip
);
tcp_proxy_server(
config.source_addr,
config.source_peer_ip,
config.dest_addr,
port_pool.clone(),
wg,
)
.await
}
#[cfg(not(feature = "bin"))]
fn main() -> anyhow::Result<()> {
Err(anyhow::anyhow!("Binary compiled without 'bin' feature"))
/// Starts the server that listens on TCP connections.
async fn tcp_proxy_server(
listen_addr: SocketAddr,
source_peer_ip: IpAddr,
dest_addr: SocketAddr,
port_pool: Arc<PortPool>,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(listen_addr)
.await
.with_context(|| "Failed to listen on TCP proxy server")?;
loop {
let wg = wg.clone();
let port_pool = port_pool.clone();
let (socket, peer_addr) = listener
.accept()
.await
.with_context(|| "Failed to accept connection on TCP proxy server")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
let virtual_port = match port_pool.next() {
Ok(port) => port,
Err(e) => {
error!(
"Failed to assign virtual port number for connection [{}]: {:?}",
peer_addr, e
);
continue;
}
};
info!("[{}] Incoming connection from {}", virtual_port, peer_addr);
tokio::spawn(async move {
let port_pool = Arc::clone(&port_pool);
let result =
handle_tcp_proxy_connection(socket, virtual_port, source_peer_ip, dest_addr, wg)
.await;
if let Err(e) = result {
error!(
"[{}] Connection dropped un-gracefully: {:?}",
virtual_port, e
);
} else {
info!("[{}] Connection closed by client", virtual_port);
}
// Release port when connection drops
port_pool.release(virtual_port);
});
}
}
#[cfg(feature = "bin")]
fn init_logger(config: &onetun::config::Config) -> anyhow::Result<()> {
use anyhow::Context;
/// Handles a new TCP connection with its assigned virtual port.
async fn handle_tcp_proxy_connection(
socket: TcpStream,
virtual_port: u16,
source_peer_ip: IpAddr,
dest_addr: SocketAddr,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
// Abort signal for stopping the Virtual Interface
let abort = Arc::new(AtomicBool::new(false));
let mut builder = pretty_env_logger::formatted_timed_builder();
// data_to_real_client_(tx/rx): This task reads the data from this mpsc channel to send back
// to the real client.
let (data_to_real_client_tx, mut data_to_real_client_rx) = tokio::sync::mpsc::channel(1_000);
// data_to_real_server_(tx/rx): This task sends the data received from the real client to the
// virtual interface (virtual server socket).
let (data_to_virtual_server_tx, data_to_virtual_server_rx) = tokio::sync::mpsc::channel(1_000);
// Spawn virtual interface
{
let abort = abort.clone();
tokio::spawn(async move {
virtual_tcp_interface(
virtual_port,
source_peer_ip,
dest_addr,
wg,
abort,
data_to_real_client_tx,
data_to_virtual_server_rx,
)
.await
});
}
loop {
if abort.load(Ordering::Relaxed) {
break;
}
tokio::select! {
readable_result = socket.readable() => {
match readable_result {
Ok(_) => {
let mut buffer = vec![];
match socket.try_read_buf(&mut buffer) {
Ok(size) if size > 0 => {
let data = &buffer[..size];
debug!(
"[{}] Read {} bytes of TCP data from real client",
virtual_port, size
);
if let Err(e) = data_to_virtual_server_tx.send(data.to_vec()).await {
error!(
"[{}] Failed to dispatch data to virtual interface: {:?}",
virtual_port, e
);
}
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
error!(
"[{}] Failed to read from client TCP socket: {:?}",
virtual_port, e
);
break;
}
_ => {
break;
}
}
}
Err(e) => {
error!("[{}] Failed to check if readable: {:?}", virtual_port, e);
break;
}
}
}
data_recv_result = data_to_real_client_rx.recv() => {
match data_recv_result {
Some(data) => match socket.try_write(&data) {
Ok(size) => {
debug!(
"[{}] Wrote {} bytes of TCP data to real client",
virtual_port, size
);
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
error!(
"[{}] Failed to write to client TCP socket: {:?}",
virtual_port, e
);
}
},
None => continue,
}
}
}
}
trace!("[{}] TCP socket handler task terminated", virtual_port);
abort.store(true, Ordering::Relaxed);
Ok(())
}
async fn virtual_tcp_interface(
virtual_port: u16,
source_peer_ip: IpAddr,
dest_addr: SocketAddr,
wg: Arc<WireGuardTunnel>,
abort: Arc<AtomicBool>,
data_to_real_client_tx: tokio::sync::mpsc::Sender<Vec<u8>>,
mut data_to_virtual_server_rx: tokio::sync::mpsc::Receiver<Vec<u8>>,
) -> anyhow::Result<()> {
// Create a device and interface to simulate IP packets
// In essence:
// * TCP packets received from the 'real' client are 'sent' to the 'virtual server' via the 'virtual client'
// * Those TCP packets generate IP packets, which are captured from the interface and sent to the WireGuardTunnel
// * IP packets received by the WireGuardTunnel (from the endpoint) are fed into this 'virtual interface'
// * The interface processes those IP packets and routes them to the 'virtual client' (the rest is discarded)
// * The TCP data read by the 'virtual client' is sent to the 'real' TCP client
// Consumer for IP packets to send through the virtual interface
// Initialize the interface
let device = VirtualIpDevice::new(wg);
let mut virtual_interface = InterfaceBuilder::new(device)
.ip_addrs([
// Interface handles IP packets for the sender and recipient
IpCidr::new(IpAddress::from(source_peer_ip), 32),
IpCidr::new(IpAddress::from(dest_addr.ip()), 32),
])
.any_ip(true)
.finalize();
// Server socket: this is a placeholder for the interface to route new connections to.
// TODO: Determine if we even need buffers here.
let server_socket: anyhow::Result<TcpSocket> = {
static mut TCP_SERVER_RX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
static mut TCP_SERVER_TX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
let tcp_rx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.listen((IpAddress::from(dest_addr.ip()), dest_addr.port()))
.with_context(|| "Virtual server socket failed to listen")?;
Ok(socket)
};
let client_socket: anyhow::Result<TcpSocket> = {
static mut TCP_SERVER_RX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
static mut TCP_SERVER_TX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
let tcp_rx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.connect(
(IpAddress::from(dest_addr.ip()), dest_addr.port()),
(IpAddress::from(source_peer_ip), virtual_port),
)
.with_context(|| "Virtual server socket failed to listen")?;
Ok(socket)
};
// Socket set: there are always 2 sockets: 1 virtual client and 1 virtual server.
let mut socket_set_entries: [_; 2] = Default::default();
let mut socket_set = SocketSet::new(&mut socket_set_entries[..]);
let _server_handle = socket_set.add(server_socket?);
let client_handle = socket_set.add(client_socket?);
let mut graceful_shutdown = false;
loop {
let loop_start = smoltcp::time::Instant::now();
let forceful_shutdown = abort.load(Ordering::Relaxed);
if forceful_shutdown {
// Un-graceful shutdown: sends a RST packet.
trace!(
"[{}] Forcefully shutting down virtual interface",
virtual_port
);
let mut client_socket = socket_set.get::<TcpSocket>(client_handle);
client_socket.abort();
}
match virtual_interface.poll(&mut socket_set, loop_start) {
Ok(processed) if processed => {
trace!(
"[{}] Virtual interface polled some packets to be processed",
virtual_port
);
}
Err(e) => {
error!("[{}] Virtual interface poll error: {:?}", virtual_port, e);
}
_ => {}
}
{
let mut client_socket = socket_set.get::<TcpSocket>(client_handle);
if client_socket.can_recv() {
match client_socket.recv(|buffer| (buffer.len(), buffer.to_vec())) {
Ok(data) => {
// Send it to the real client
if let Err(e) = data_to_real_client_tx.send(data).await {
error!("[{}] Failed to dispatch data from virtual client to real client: {:?}", virtual_port, e);
}
}
Err(e) => {
error!(
"[{}] Failed to read from virtual client socket: {:?}",
virtual_port, e
);
}
}
}
if client_socket.can_send() {
// Check if there is anything to send
if let Ok(data) = data_to_virtual_server_rx.try_recv() {
if let Err(e) = client_socket.send_slice(&data) {
error!(
"[{}] Failed to send slice via virtual client socket: {:?}",
virtual_port, e
);
}
}
}
if !graceful_shutdown && !forceful_shutdown && !client_socket.is_active() {
// Graceful shutdown
client_socket.close();
trace!(
"[{}] Gracefully shutting down virtual interface",
virtual_port
);
// We don't break the loop right away so that the FIN segment can be sent in the next poll.
graceful_shutdown = true;
continue;
}
}
if graceful_shutdown || forceful_shutdown {
break;
}
tokio::time::sleep(Duration::from_millis(1)).await;
}
trace!("[{}] Virtual interface task terminated", virtual_port);
abort.store(true, Ordering::Relaxed);
Ok(())
}
fn init_logger(config: &Config) -> anyhow::Result<()> {
let mut builder = pretty_env_logger::formatted_builder();
builder.parse_filters(&config.log);
builder.try_init().context("Failed to initialize logger")
builder
.try_init()
.with_context(|| "Failed to initialize logger")
}

View file

@ -1,113 +0,0 @@
use crate::events::Event;
use crate::Bus;
use anyhow::Context;
use smoltcp::time::Instant;
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
struct Pcap {
writer: BufWriter<File>,
}
/// libpcap file writer
/// This is mostly taken from `smoltcp`, but rewritten to be async.
impl Pcap {
async fn flush(&mut self) -> anyhow::Result<()> {
self.writer
.flush()
.await
.context("Failed to flush pcap writer")
}
async fn write(&mut self, data: &[u8]) -> anyhow::Result<usize> {
self.writer
.write(data)
.await
.with_context(|| format!("Failed to write {} bytes to pcap writer", data.len()))
}
async fn write_u16(&mut self, value: u16) -> anyhow::Result<()> {
self.writer
.write_u16(value)
.await
.context("Failed to write u16 to pcap writer")
}
async fn write_u32(&mut self, value: u32) -> anyhow::Result<()> {
self.writer
.write_u32(value)
.await
.context("Failed to write u32 to pcap writer")
}
async fn global_header(&mut self) -> anyhow::Result<()> {
self.write_u32(0xa1b2c3d4).await?; // magic number
self.write_u16(2).await?; // major version
self.write_u16(4).await?; // minor version
self.write_u32(0).await?; // timezone (= UTC)
self.write_u32(0).await?; // accuracy (not used)
self.write_u32(65535).await?; // maximum packet length
self.write_u32(101).await?; // link-layer header type (101 = IP)
self.flush().await
}
async fn packet_header(&mut self, timestamp: Instant, length: usize) -> anyhow::Result<()> {
assert!(length <= 65535);
self.write_u32(timestamp.secs() as u32).await?; // timestamp seconds
self.write_u32(timestamp.micros() as u32).await?; // timestamp microseconds
self.write_u32(length as u32).await?; // captured length
self.write_u32(length as u32).await?; // original length
Ok(())
}
async fn packet(&mut self, timestamp: Instant, packet: &[u8]) -> anyhow::Result<()> {
self.packet_header(timestamp, packet.len())
.await
.context("Failed to write packet header to pcap writer")?;
self.write(packet)
.await
.context("Failed to write packet to pcap writer")?;
self.writer
.flush()
.await
.context("Failed to flush pcap writer")?;
self.flush().await
}
}
/// Listens on the event bus for IP packets sent from and to the WireGuard tunnel.
pub async fn capture(pcap_file: String, bus: Bus) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
let file = File::create(&pcap_file)
.await
.context("Failed to create pcap file")?;
let writer = BufWriter::new(file);
let mut writer = Pcap { writer };
writer
.global_header()
.await
.context("Failed to write global header to pcap writer")?;
info!("Capturing WireGuard IP packets to {}", &pcap_file);
loop {
match endpoint.recv().await {
Event::InboundInternetPacket(_proto, ip) => {
let instant = Instant::now();
writer
.packet(instant, &ip)
.await
.context("Failed to write inbound IP packet to pcap writer")?;
}
Event::OutboundInternetPacket(ip) => {
let instant = Instant::now();
writer
.packet(instant, &ip)
.await
.context("Failed to write output IP packet to pcap writer")?;
}
_ => {}
}
}
}

58
src/port_pool.rs Normal file
View file

@ -0,0 +1,58 @@
use std::ops::Range;
use anyhow::Context;
const MIN_PORT: u16 = 32768;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// A pool of virtual ports available.
/// This structure is thread-safe and lock-free; you can use it safely in an `Arc`.
pub struct PortPool {
/// Remaining ports
inner: lockfree::queue::Queue<u16>,
/// Ports in use
taken: lockfree::set::Set<u16>,
}
impl Default for PortPool {
fn default() -> Self {
Self::new()
}
}
impl PortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let inner = lockfree::queue::Queue::default();
PORT_RANGE.for_each(|p| inner.push(p) as ());
Self {
inner,
taken: lockfree::set::Set::new(),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhaused max capacity).
pub fn next(&self) -> anyhow::Result<u16> {
let port = self
.inner
.pop()
.with_context(|| "Virtual port pool is exhausted")?;
self.taken
.insert(port)
.ok()
.with_context(|| "Failed to insert taken")?;
Ok(port)
}
/// Releases a port back into the pool.
pub fn release(&self, port: u16) {
self.inner.push(port);
self.taken.remove(&port);
}
/// Whether the given port is in use by a virtual interface.
pub fn is_in_use(&self, port: u16) -> bool {
self.taken.contains(&port)
}
}

View file

@ -1,34 +0,0 @@
use std::net::IpAddr;
use std::sync::Arc;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::wg::WireGuardTunnel;
pub mod tcp;
pub mod udp;
pub async fn port_forward(
port_forward: PortForwardConfig,
source_peer_ip: IpAddr,
tcp_port_pool: TcpPortPool,
udp_port_pool: UdpPortPool,
wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> {
info!(
"Tunneling {} [{}]->[{}] (via [{}] as peer {})",
port_forward.protocol,
port_forward.source,
port_forward.destination,
&wg.endpoint,
source_peer_ip
);
match port_forward.protocol {
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, bus).await,
PortProtocol::Udp => udp::udp_proxy_server(port_forward, udp_port_pool, bus).await,
}
}

View file

@ -1,211 +0,0 @@
use std::collections::VecDeque;
use std::ops::Range;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use bytes::BytesMut;
use rand::seq::SliceRandom;
use rand::thread_rng;
use tokio::io::AsyncWriteExt;
use tokio::net::{TcpListener, TcpStream};
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::{Bus, Event};
use crate::virtual_iface::VirtualPort;
const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// Starts the server that listens on TCP connections.
pub async fn tcp_proxy_server(
port_forward: PortForwardConfig,
port_pool: TcpPortPool,
bus: Bus,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(port_forward.source)
.await
.context("Failed to listen on TCP proxy server")?;
loop {
let port_pool = port_pool.clone();
let (socket, peer_addr) = listener
.accept()
.await
.context("Failed to accept connection on TCP proxy server")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
let virtual_port = match port_pool.next().await {
Ok(port) => port,
Err(e) => {
error!(
"Failed to assign virtual port number for connection [{}]: {:?}",
peer_addr, e
);
continue;
}
};
info!("[{}] Incoming connection from {}", virtual_port, peer_addr);
let bus = bus.clone();
tokio::spawn(async move {
let port_pool = port_pool.clone();
let result = handle_tcp_proxy_connection(socket, virtual_port, port_forward, bus).await;
if let Err(e) = result {
error!(
"[{}] Connection dropped un-gracefully: {:?}",
virtual_port, e
);
} else {
info!("[{}] Connection closed by client", virtual_port);
}
tokio::time::sleep(Duration::from_millis(100)).await; // Make sure the other tasks have time to process the event
port_pool.release(virtual_port).await;
});
}
}
/// Handles a new TCP connection with its assigned virtual port.
async fn handle_tcp_proxy_connection(
mut socket: TcpStream,
virtual_port: VirtualPort,
port_forward: PortForwardConfig,
bus: Bus,
) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
endpoint.send(Event::ClientConnectionInitiated(port_forward, virtual_port));
let mut buffer = BytesMut::with_capacity(MAX_PACKET);
loop {
tokio::select! {
readable_result = socket.readable() => {
match readable_result {
Ok(_) => {
match socket.try_read_buf(&mut buffer) {
Ok(size) if size > 0 => {
let data = Vec::from(&buffer[..size]);
endpoint.send(Event::LocalData(port_forward, virtual_port, data.into()));
// Reset buffer
buffer.clear();
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
error!(
"[{}] Failed to read from client TCP socket: {:?}",
virtual_port, e
);
break;
}
_ => {
break;
}
}
}
Err(e) => {
error!("[{}] Failed to check if readable: {:?}", virtual_port, e);
break;
}
}
}
event = endpoint.recv() => {
match event {
Event::ClientConnectionDropped(e_vp) if e_vp == virtual_port => {
// This connection is supposed to be closed, stop the task.
break;
}
Event::RemoteData(e_vp, data) if e_vp == virtual_port => {
// Have remote data to send to the local client
if let Err(e) = socket.writable().await {
error!("[{}] Failed to check if writable: {:?}", virtual_port, e);
}
let expected = data.len();
let mut sent = 0;
loop {
if sent >= expected {
break;
}
match socket.write(&data[sent..expected]).await {
Ok(written) => {
debug!("[{}] Sent {} (expected {}) bytes to local client", virtual_port, written, expected);
sent += written;
if sent < expected {
debug!("[{}] Will try to resend remaining {} bytes to local client", virtual_port, (expected - written));
}
},
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, expected, e);
break;
}
}
}
}
_ => {}
}
}
}
}
// Notify other endpoints that this task has closed and no more data is to be sent to the local client
endpoint.send(Event::ClientConnectionDropped(virtual_port));
Ok(())
}
/// A pool of virtual ports available for TCP connections.
#[derive(Clone)]
pub struct TcpPortPool {
inner: Arc<tokio::sync::RwLock<TcpPortPoolInner>>,
}
impl Default for TcpPortPool {
fn default() -> Self {
Self::new()
}
}
impl TcpPortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let mut inner = TcpPortPoolInner::default();
let mut ports: Vec<u16> = PORT_RANGE.collect();
ports.shuffle(&mut thread_rng());
ports
.into_iter()
.for_each(|p| inner.queue.push_back(p) as ());
Self {
inner: Arc::new(tokio::sync::RwLock::new(inner)),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self) -> anyhow::Result<VirtualPort> {
let mut inner = self.inner.write().await;
let port = inner
.queue
.pop_front()
.context("TCP virtual port pool is exhausted")?;
Ok(VirtualPort::new(port, PortProtocol::Tcp))
}
/// Releases a port back into the pool.
pub async fn release(&self, port: VirtualPort) {
let mut inner = self.inner.write().await;
inner.queue.push_back(port.num());
}
}
/// Non thread-safe inner logic for TCP port pool.
#[derive(Debug, Default)]
struct TcpPortPoolInner {
/// Remaining ports in the pool.
queue: VecDeque<u16>,
}

View file

@ -1,257 +0,0 @@
use std::collections::{HashMap, VecDeque};
use std::net::{IpAddr, SocketAddr};
use std::ops::Range;
use std::sync::Arc;
use std::time::Instant;
use anyhow::Context;
use bytes::Bytes;
use priority_queue::double_priority_queue::DoublePriorityQueue;
use rand::seq::SliceRandom;
use rand::thread_rng;
use tokio::net::UdpSocket;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::{Bus, Event};
use crate::virtual_iface::VirtualPort;
const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// How long to keep the UDP peer address assigned to its virtual specified port, in seconds.
/// TODO: Make this configurable by the CLI
const UDP_TIMEOUT_SECONDS: u64 = 60;
/// To prevent port-flooding, we set a limit on the amount of open ports per IP address.
/// TODO: Make this configurable by the CLI
const PORTS_PER_IP: usize = 100;
/// Starts the server that listens on UDP datagrams.
pub async fn udp_proxy_server(
port_forward: PortForwardConfig,
port_pool: UdpPortPool,
bus: Bus,
) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
let socket = UdpSocket::bind(port_forward.source)
.await
.context("Failed to bind on UDP proxy address")?;
let mut buffer = [0u8; MAX_PACKET];
loop {
tokio::select! {
to_send_result = next_udp_datagram(&socket, &mut buffer, port_pool.clone()) => {
match to_send_result {
Ok(Some((port, data))) => {
endpoint.send(Event::LocalData(port_forward, port, data));
}
Ok(None) => {
continue;
}
Err(e) => {
error!(
"Failed to read from client UDP socket: {:?}",
e
);
break;
}
}
}
event = endpoint.recv() => {
if let Event::RemoteData(virtual_port, data) = event {
if let Some(peer) = port_pool.get_peer_addr(virtual_port).await {
// Have remote data to send to the local client
if let Err(e) = socket.writable().await {
error!("[{}] Failed to check if writable: {:?}", virtual_port, e);
}
let expected = data.len();
let mut sent = 0;
loop {
if sent >= expected {
break;
}
match socket.send_to(&data[sent..expected], peer).await {
Ok(written) => {
debug!("[{}] Sent {} (expected {}) bytes to local client", virtual_port, written, expected);
sent += written;
if sent < expected {
debug!("[{}] Will try to resend remaining {} bytes to local client", virtual_port, (expected - written));
}
},
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, expected, e);
break;
}
}
}
port_pool.update_last_transmit(virtual_port).await;
}
}
}
}
}
Ok(())
}
async fn next_udp_datagram(
socket: &UdpSocket,
buffer: &mut [u8],
port_pool: UdpPortPool,
) -> anyhow::Result<Option<(VirtualPort, Bytes)>> {
let (size, peer_addr) = socket
.recv_from(buffer)
.await
.context("Failed to accept incoming UDP datagram")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
let port = match port_pool.next(peer_addr).await {
Ok(port) => port,
Err(e) => {
error!(
"Failed to assign virtual port number for UDP datagram from [{}]: {:?}",
peer_addr, e
);
return Ok(None);
}
};
debug!(
"[{}] Received datagram of {} bytes from {}",
port, size, peer_addr
);
port_pool.update_last_transmit(port).await;
let data = buffer[..size].to_vec();
Ok(Some((port, data.into())))
}
/// A pool of virtual ports available for TCP connections.
#[derive(Clone)]
pub struct UdpPortPool {
inner: Arc<tokio::sync::RwLock<UdpPortPoolInner>>,
}
impl Default for UdpPortPool {
fn default() -> Self {
Self::new()
}
}
impl UdpPortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let mut inner = UdpPortPoolInner::default();
let mut ports: Vec<u16> = PORT_RANGE.collect();
ports.shuffle(&mut thread_rng());
ports
.into_iter()
.for_each(|p| inner.queue.push_back(p) as ());
Self {
inner: Arc::new(tokio::sync::RwLock::new(inner)),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self, peer_addr: SocketAddr) -> anyhow::Result<VirtualPort> {
// A port found to be reused. This is outside of the block because the read lock cannot be upgraded to a write lock.
let mut port_reuse: Option<u16> = None;
{
let inner = self.inner.read().await;
if let Some(port) = inner.port_by_peer_addr.get(&peer_addr) {
return Ok(VirtualPort::new(*port, PortProtocol::Udp));
}
// Count how many ports are being used by the peer IP
let peer_ip = peer_addr.ip();
let peer_port_count = inner
.peer_port_usage
.get(&peer_ip)
.map(|v| v.len())
.unwrap_or_default();
if peer_port_count >= PORTS_PER_IP {
// Return least recently used port in this IP's pool
port_reuse = Some(
*(inner
.peer_port_usage
.get(&peer_ip)
.unwrap()
.peek_min()
.unwrap()
.0),
);
warn!(
"Peer [{}] is re-using active virtual port {} due to self-exhaustion.",
peer_addr,
port_reuse.unwrap()
);
}
}
let mut inner = self.inner.write().await;
let port = port_reuse
.or_else(|| inner.queue.pop_front())
.or_else(|| {
// If there is no port to reuse, and the port pool is exhausted, take the last recently used port overall,
// as long as the last transmission exceeds the deadline
let last: (&u16, &Instant) = inner.port_usage.peek_min().unwrap();
if Instant::now().duration_since(*last.1).as_secs() > UDP_TIMEOUT_SECONDS {
warn!(
"Peer [{}] is re-using inactive virtual port {} due to global exhaustion.",
peer_addr, last.0
);
Some(*last.0)
} else {
None
}
})
.context("Virtual port pool is exhausted")?;
inner.port_by_peer_addr.insert(peer_addr, port);
inner.peer_addr_by_port.insert(port, peer_addr);
Ok(VirtualPort::new(port, PortProtocol::Udp))
}
/// Notify that the given virtual port has received or transmitted a UDP datagram.
pub async fn update_last_transmit(&self, port: VirtualPort) {
let mut inner = self.inner.write().await;
if let Some(peer) = inner.peer_addr_by_port.get(&port.num()).copied() {
let pq: &mut DoublePriorityQueue<u16, Instant> = inner
.peer_port_usage
.entry(peer.ip())
.or_insert_with(Default::default);
pq.push(port.num(), Instant::now());
}
let pq: &mut DoublePriorityQueue<u16, Instant> = &mut inner.port_usage;
pq.push(port.num(), Instant::now());
}
pub async fn get_peer_addr(&self, port: VirtualPort) -> Option<SocketAddr> {
let inner = self.inner.read().await;
inner.peer_addr_by_port.get(&port.num()).copied()
}
}
/// Non thread-safe inner logic for UDP port pool.
#[derive(Debug, Default)]
struct UdpPortPoolInner {
/// Remaining ports in the pool.
queue: VecDeque<u16>,
/// The port assigned by peer IP/port. This is used to lookup an existing virtual port
/// for an incoming UDP datagram.
port_by_peer_addr: HashMap<SocketAddr, u16>,
/// The socket address assigned to a peer IP/port. This is used to send a UDP datagram to
/// the real peer address, given the virtual port.
peer_addr_by_port: HashMap<u16, SocketAddr>,
/// Keeps an ordered map of the most recently used virtual ports by a peer (client) IP.
peer_port_usage: HashMap<IpAddr, DoublePriorityQueue<u16, Instant>>,
/// Keeps an ordered map of the most recently used virtual ports in general.
port_usage: DoublePriorityQueue<u16, Instant>,
}

View file

@ -1,136 +1,94 @@
use crate::config::PortProtocol;
use crate::events::{BusSender, Event};
use crate::Bus;
use bytes::{BufMut, Bytes, BytesMut};
use smoltcp::{
phy::{DeviceCapabilities, Medium},
time::Instant,
};
use std::{
collections::VecDeque,
sync::{Arc, Mutex},
};
use crate::wg::WireGuardTunnel;
use smoltcp::phy::{Device, DeviceCapabilities, Medium};
use smoltcp::time::Instant;
use std::sync::Arc;
/// A virtual device that processes IP packets through smoltcp and WireGuard.
/// A virtual device that processes IP packets. IP packets received from the WireGuard endpoint
/// are made available to this device using a broadcast channel receiver. IP packets sent from this device
/// are asynchronously sent out to the WireGuard tunnel.
pub struct VirtualIpDevice {
/// Max transmission unit (bytes)
max_transmission_unit: usize,
/// Channel receiver for received IP packets.
bus_sender: BusSender,
/// Local queue for packets received from the bus that need to go through the smoltcp interface.
process_queue: Arc<Mutex<VecDeque<Bytes>>>,
/// Tunnel to send IP packets to.
wg: Arc<WireGuardTunnel>,
/// Broadcast channel receiver for received IP packets.
ip_broadcast_rx: tokio::sync::broadcast::Receiver<Vec<u8>>,
}
impl VirtualIpDevice {
/// Initializes a new virtual IP device.
pub fn new(protocol: PortProtocol, bus: Bus, max_transmission_unit: usize) -> Self {
let mut bus_endpoint = bus.new_endpoint();
let bus_sender = bus_endpoint.sender();
let process_queue = Arc::new(Mutex::new(VecDeque::new()));
{
let process_queue = process_queue.clone();
tokio::spawn(async move {
loop {
match bus_endpoint.recv().await {
Event::InboundInternetPacket(ip_proto, data) if ip_proto == protocol => {
let mut queue = process_queue
.lock()
.expect("Failed to acquire process queue lock");
queue.push_back(data);
bus_endpoint.send(Event::VirtualDeviceFed(ip_proto));
}
_ => {}
}
}
});
}
pub fn new(wg: Arc<WireGuardTunnel>) -> Self {
let ip_broadcast_rx = wg.subscribe();
Self {
bus_sender,
process_queue,
max_transmission_unit,
wg,
ip_broadcast_rx,
}
}
}
impl smoltcp::phy::Device for VirtualIpDevice {
type RxToken<'a>
= RxToken
where
Self: 'a;
type TxToken<'a>
= TxToken
where
Self: 'a;
impl<'a> Device<'a> for VirtualIpDevice {
type RxToken = RxToken;
type TxToken = TxToken;
fn receive(&mut self, _timestamp: Instant) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
let next = {
let mut queue = self
.process_queue
.lock()
.expect("Failed to acquire process queue lock");
queue.pop_front()
};
match next {
Some(buffer) => Some((
Self::RxToken {
buffer: {
let mut buf = BytesMut::new();
buf.put(buffer);
buf
},
},
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
match self.ip_broadcast_rx.try_recv() {
Ok(buffer) => Some((
Self::RxToken { buffer },
Self::TxToken {
sender: self.bus_sender.clone(),
wg: self.wg.clone(),
},
)),
None => None,
Err(_) => None,
}
}
fn transmit(&mut self, _timestamp: Instant) -> Option<Self::TxToken<'_>> {
fn transmit(&'a mut self) -> Option<Self::TxToken> {
Some(TxToken {
sender: self.bus_sender.clone(),
wg: self.wg.clone(),
})
}
fn capabilities(&self) -> DeviceCapabilities {
let mut cap = DeviceCapabilities::default();
cap.medium = Medium::Ip;
cap.max_transmission_unit = self.max_transmission_unit;
cap.max_transmission_unit = 65535;
cap
}
}
#[doc(hidden)]
pub struct RxToken {
buffer: BytesMut,
buffer: Vec<u8>,
}
impl smoltcp::phy::RxToken for RxToken {
fn consume<R, F>(self, f: F) -> R
fn consume<R, F>(mut self, _timestamp: Instant, f: F) -> smoltcp::Result<R>
where
F: FnOnce(&[u8]) -> R,
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
{
f(&self.buffer)
f(&mut self.buffer)
}
}
#[doc(hidden)]
pub struct TxToken {
sender: BusSender,
wg: Arc<WireGuardTunnel>,
}
impl smoltcp::phy::TxToken for TxToken {
fn consume<R, F>(self, len: usize, f: F) -> R
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> smoltcp::Result<R>
where
F: FnOnce(&mut [u8]) -> R,
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
{
let mut buffer = vec![0; len];
let mut buffer = Vec::new();
buffer.resize(len, 0);
let result = f(&mut buffer);
self.sender
.send(Event::OutboundInternetPacket(buffer.into()));
tokio::spawn(async move {
match self.wg.send_ip_packet(&buffer).await {
Ok(_) => {}
Err(e) => {
error!("Failed to send IP packet to WireGuard endpoint: {:?}", e);
}
}
});
result
}
}

View file

@ -1,65 +0,0 @@
pub mod tcp;
pub mod udp;
use crate::config::PortProtocol;
use crate::VirtualIpDevice;
use async_trait::async_trait;
use std::fmt::{Display, Formatter};
#[async_trait]
pub trait VirtualInterfacePoll {
/// Initializes the virtual interface and processes incoming data to be dispatched
/// to the WireGuard tunnel and to the real client.
async fn poll_loop(mut self, device: VirtualIpDevice) -> anyhow::Result<()>;
}
/// Virtual port.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct VirtualPort(u16, PortProtocol);
impl VirtualPort {
/// Create a new `VirtualPort` instance, with the given port number and associated protocol.
pub fn new(port: u16, proto: PortProtocol) -> Self {
VirtualPort(port, proto)
}
/// The port number
pub fn num(&self) -> u16 {
self.0
}
/// The protocol of this port.
pub fn proto(&self) -> PortProtocol {
self.1
}
}
impl From<VirtualPort> for u16 {
fn from(port: VirtualPort) -> Self {
port.num()
}
}
impl From<&VirtualPort> for u16 {
fn from(port: &VirtualPort) -> Self {
port.num()
}
}
impl From<VirtualPort> for PortProtocol {
fn from(port: VirtualPort) -> Self {
port.proto()
}
}
impl From<&VirtualPort> for PortProtocol {
fn from(port: &VirtualPort) -> Self {
port.proto()
}
}
impl Display for VirtualPort {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "[{}:{}]", self.num(), self.proto())
}
}

View file

@ -1,257 +0,0 @@
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Event;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::Bus;
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use smoltcp::iface::PollResult;
use smoltcp::{
iface::{Config, Interface, SocketHandle, SocketSet},
socket::tcp,
time::Instant,
wire::{HardwareAddress, IpAddress, IpCidr, IpVersion},
};
use std::{
collections::{HashMap, HashSet, VecDeque},
net::IpAddr,
time::Duration,
};
const MAX_PACKET: usize = 65536;
/// A virtual interface for proxying Layer 7 data to Layer 3 packets, and vice-versa.
pub struct TcpVirtualInterface {
source_peer_ip: IpAddr,
port_forwards: Vec<PortForwardConfig>,
bus: Bus,
sockets: SocketSet<'static>,
}
impl TcpVirtualInterface {
/// Initialize the parameters for a new virtual interface.
/// Use the `poll_loop()` future to start the virtual interface poll loop.
pub fn new(port_forwards: Vec<PortForwardConfig>, bus: Bus, source_peer_ip: IpAddr) -> Self {
Self {
port_forwards: port_forwards
.into_iter()
.filter(|f| matches!(f.protocol, PortProtocol::Tcp))
.collect(),
source_peer_ip,
bus,
sockets: SocketSet::new([]),
}
}
fn new_server_socket(port_forward: PortForwardConfig) -> anyhow::Result<tcp::Socket<'static>> {
static mut TCP_SERVER_RX_DATA: [u8; 0] = [];
static mut TCP_SERVER_TX_DATA: [u8; 0] = [];
let tcp_rx_buffer = tcp::SocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = tcp::SocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = tcp::Socket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.listen((
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
))
.context("Virtual server socket failed to listen")?;
Ok(socket)
}
fn new_client_socket() -> anyhow::Result<tcp::Socket<'static>> {
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let tcp_rx_buffer = tcp::SocketBuffer::new(rx_data);
let tcp_tx_buffer = tcp::SocketBuffer::new(tx_data);
let socket = tcp::Socket::new(tcp_rx_buffer, tcp_tx_buffer);
Ok(socket)
}
fn addresses(&self) -> Vec<IpCidr> {
let mut addresses = HashSet::new();
addresses.insert(IpAddress::from(self.source_peer_ip));
for config in self.port_forwards.iter() {
addresses.insert(IpAddress::from(config.destination.ip()));
}
addresses
.into_iter()
.map(|addr| IpCidr::new(addr, addr_length(&addr)))
.collect()
}
}
#[async_trait]
impl VirtualInterfacePoll for TcpVirtualInterface {
async fn poll_loop(mut self, mut device: VirtualIpDevice) -> anyhow::Result<()> {
// Create CIDR block for source peer IP + each port forward IP
let addresses = self.addresses();
let config = Config::new(HardwareAddress::Ip);
// Create virtual interface (contains smoltcp state machine)
let mut iface = Interface::new(config, &mut device, Instant::now());
iface.update_ip_addrs(|ip_addrs| {
addresses.into_iter().for_each(|addr| {
ip_addrs
.push(addr)
.expect("maximum number of IPs in TCP interface reached");
});
});
// Create virtual server for each port forward
for port_forward in self.port_forwards.iter() {
let server_socket = TcpVirtualInterface::new_server_socket(*port_forward)?;
self.sockets.add(server_socket);
}
// The next time to poll the interface. Can be None for instant poll.
let mut next_poll: Option<tokio::time::Instant> = None;
// Bus endpoint to read events
let mut endpoint = self.bus.new_endpoint();
// Maps virtual port to its client socket handle
let mut port_client_handle_map: HashMap<VirtualPort, SocketHandle> = HashMap::new();
// Data packets to send from a virtual client
let mut send_queue: HashMap<VirtualPort, VecDeque<Bytes>> = HashMap::new();
loop {
tokio::select! {
_ = match (next_poll, port_client_handle_map.len()) {
(None, 0) => tokio::time::sleep(Duration::MAX),
(None, _) => tokio::time::sleep(Duration::ZERO),
(Some(until), _) => tokio::time::sleep_until(until),
} => {
let loop_start = smoltcp::time::Instant::now();
// Find closed sockets
port_client_handle_map.retain(|virtual_port, client_handle| {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
if client_socket.state() == tcp::State::Closed {
endpoint.send(Event::ClientConnectionDropped(*virtual_port));
send_queue.remove(virtual_port);
self.sockets.remove(*client_handle);
false
} else {
// Not closed, retain
true
}
});
if iface.poll(loop_start, &mut device, &mut self.sockets) == PollResult::SocketStateChanged {
log::trace!("TCP virtual interface polled some packets to be processed");
}
for (virtual_port, client_handle) in port_client_handle_map.iter() {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
if client_socket.can_send() {
if let Some(send_queue) = send_queue.get_mut(virtual_port) {
let to_transfer = send_queue.pop_front();
if let Some(to_transfer_slice) = to_transfer.as_deref() {
let total = to_transfer_slice.len();
match client_socket.send_slice(to_transfer_slice) {
Ok(sent) => {
if sent < total {
// Sometimes only a subset is sent, so the rest needs to be sent on the next poll
let tx_extra = Vec::from(&to_transfer_slice[sent..total]);
send_queue.push_front(tx_extra.into());
}
}
Err(e) => {
error!(
"Failed to send slice via virtual client socket: {:?}", e
);
}
}
} else if client_socket.state() == tcp::State::CloseWait {
client_socket.close();
}
}
}
if client_socket.can_recv() {
match client_socket.recv(|buffer| (buffer.len(), Bytes::from(buffer.to_vec()))) {
Ok(data) => {
debug!("[{}] Received {} bytes from virtual server", virtual_port, data.len());
if !data.is_empty() {
endpoint.send(Event::RemoteData(*virtual_port, data));
}
}
Err(e) => {
error!(
"Failed to read from virtual client socket: {:?}", e
);
}
}
}
}
// The virtual interface determines the next time to poll (this is to reduce unnecessary polls)
next_poll = match iface.poll_delay(loop_start, &self.sockets) {
Some(smoltcp::time::Duration::ZERO) => None,
Some(delay) => {
trace!("TCP Virtual interface delayed next poll by {}", delay);
Some(tokio::time::Instant::now() + Duration::from_millis(delay.total_millis()))
},
None => None,
};
}
event = endpoint.recv() => {
match event {
Event::ClientConnectionInitiated(port_forward, virtual_port) => {
let client_socket = TcpVirtualInterface::new_client_socket()?;
let client_handle = self.sockets.add(client_socket);
// Add handle to map
port_client_handle_map.insert(virtual_port, client_handle);
send_queue.insert(virtual_port, VecDeque::new());
let client_socket = self.sockets.get_mut::<tcp::Socket>(client_handle);
let context = iface.context();
client_socket
.connect(
context,
(
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
),
(IpAddress::from(self.source_peer_ip), virtual_port.num()),
)
.context("Virtual server socket failed to listen")?;
next_poll = None;
}
Event::ClientConnectionDropped(virtual_port) => {
if let Some(client_handle) = port_client_handle_map.get(&virtual_port) {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
client_socket.close();
next_poll = None;
}
}
Event::LocalData(_, virtual_port, data) if send_queue.contains_key(&virtual_port) => {
if let Some(send_queue) = send_queue.get_mut(&virtual_port) {
send_queue.push_back(data);
next_poll = None;
}
}
Event::VirtualDeviceFed(PortProtocol::Tcp) => {
next_poll = None;
}
_ => {}
}
}
}
}
}
}
const fn addr_length(addr: &IpAddress) -> u8 {
match addr.version() {
IpVersion::Ipv4 => 32,
IpVersion::Ipv6 => 128,
}
}

View file

@ -1,227 +0,0 @@
use crate::config::PortForwardConfig;
use crate::events::Event;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::{Bus, PortProtocol};
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use smoltcp::iface::PollResult;
use smoltcp::{
iface::{Config, Interface, SocketHandle, SocketSet},
socket::udp::{self, UdpMetadata},
time::Instant,
wire::{HardwareAddress, IpAddress, IpCidr, IpVersion},
};
use std::{
collections::{HashMap, HashSet, VecDeque},
net::IpAddr,
time::Duration,
};
const MAX_PACKET: usize = 65536;
pub struct UdpVirtualInterface {
source_peer_ip: IpAddr,
port_forwards: Vec<PortForwardConfig>,
bus: Bus,
sockets: SocketSet<'static>,
}
impl UdpVirtualInterface {
/// Initialize the parameters for a new virtual interface.
/// Use the `poll_loop()` future to start the virtual interface poll loop.
pub fn new(port_forwards: Vec<PortForwardConfig>, bus: Bus, source_peer_ip: IpAddr) -> Self {
Self {
port_forwards: port_forwards
.into_iter()
.filter(|f| matches!(f.protocol, PortProtocol::Udp))
.collect(),
source_peer_ip,
bus,
sockets: SocketSet::new([]),
}
}
fn new_server_socket(port_forward: PortForwardConfig) -> anyhow::Result<udp::Socket<'static>> {
static mut UDP_SERVER_RX_META: [udp::PacketMetadata; 0] = [];
static mut UDP_SERVER_RX_DATA: [u8; 0] = [];
static mut UDP_SERVER_TX_META: [udp::PacketMetadata; 0] = [];
static mut UDP_SERVER_TX_DATA: [u8; 0] = [];
let udp_rx_buffer =
udp::PacketBuffer::new(unsafe { &mut UDP_SERVER_RX_META[..] }, unsafe {
&mut UDP_SERVER_RX_DATA[..]
});
let udp_tx_buffer =
udp::PacketBuffer::new(unsafe { &mut UDP_SERVER_TX_META[..] }, unsafe {
&mut UDP_SERVER_TX_DATA[..]
});
let mut socket = udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
))
.context("UDP virtual server socket failed to bind")?;
Ok(socket)
}
fn new_client_socket(
source_peer_ip: IpAddr,
client_port: VirtualPort,
) -> anyhow::Result<udp::Socket<'static>> {
let rx_meta = vec![udp::PacketMetadata::EMPTY; 10];
let tx_meta = vec![udp::PacketMetadata::EMPTY; 10];
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let udp_rx_buffer = udp::PacketBuffer::new(rx_meta, rx_data);
let udp_tx_buffer = udp::PacketBuffer::new(tx_meta, tx_data);
let mut socket = udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((IpAddress::from(source_peer_ip), client_port.num()))
.context("UDP virtual client failed to bind")?;
Ok(socket)
}
fn addresses(&self) -> Vec<IpCidr> {
let mut addresses = HashSet::new();
addresses.insert(IpAddress::from(self.source_peer_ip));
for config in self.port_forwards.iter() {
addresses.insert(IpAddress::from(config.destination.ip()));
}
addresses
.into_iter()
.map(|addr| IpCidr::new(addr, addr_length(&addr)))
.collect()
}
}
#[async_trait]
impl VirtualInterfacePoll for UdpVirtualInterface {
async fn poll_loop(mut self, mut device: VirtualIpDevice) -> anyhow::Result<()> {
// Create CIDR block for source peer IP + each port forward IP
let addresses = self.addresses();
let config = Config::new(HardwareAddress::Ip);
// Create virtual interface (contains smoltcp state machine)
let mut iface = Interface::new(config, &mut device, Instant::now());
iface.update_ip_addrs(|ip_addrs| {
addresses.into_iter().for_each(|addr| {
ip_addrs
.push(addr)
.expect("maximum number of IPs in UDP interface reached");
});
});
// Create virtual server for each port forward
for port_forward in self.port_forwards.iter() {
let server_socket = UdpVirtualInterface::new_server_socket(*port_forward)?;
self.sockets.add(server_socket);
}
// The next time to poll the interface. Can be None for instant poll.
let mut next_poll: Option<tokio::time::Instant> = None;
// Bus endpoint to read events
let mut endpoint = self.bus.new_endpoint();
// Maps virtual port to its client socket handle
let mut port_client_handle_map: HashMap<VirtualPort, SocketHandle> = HashMap::new();
// Data packets to send from a virtual client
let mut send_queue: HashMap<VirtualPort, VecDeque<(PortForwardConfig, Bytes)>> =
HashMap::new();
loop {
tokio::select! {
_ = match (next_poll, port_client_handle_map.len()) {
(None, 0) => tokio::time::sleep(Duration::MAX),
(None, _) => tokio::time::sleep(Duration::ZERO),
(Some(until), _) => tokio::time::sleep_until(until),
} => {
let loop_start = smoltcp::time::Instant::now();
if iface.poll(loop_start, &mut device, &mut self.sockets) == PollResult::SocketStateChanged {
log::trace!("UDP virtual interface polled some packets to be processed");
}
for (virtual_port, client_handle) in port_client_handle_map.iter() {
let client_socket = self.sockets.get_mut::<udp::Socket>(*client_handle);
if client_socket.can_send() {
if let Some(send_queue) = send_queue.get_mut(virtual_port) {
let to_transfer = send_queue.pop_front();
if let Some((port_forward, data)) = to_transfer {
client_socket
.send_slice(
&data,
UdpMetadata::from(port_forward.destination),
)
.unwrap_or_else(|e| {
error!(
"[{}] Failed to send data to virtual server: {:?}",
virtual_port, e
);
});
}
}
}
if client_socket.can_recv() {
match client_socket.recv() {
Ok((data, _peer)) => {
if !data.is_empty() {
endpoint.send(Event::RemoteData(*virtual_port, data.to_vec().into()));
}
}
Err(e) => {
error!(
"Failed to read from virtual client socket: {:?}", e
);
}
}
}
}
// The virtual interface determines the next time to poll (this is to reduce unnecessary polls)
next_poll = match iface.poll_delay(loop_start, &self.sockets) {
Some(smoltcp::time::Duration::ZERO) => None,
Some(delay) => {
trace!("UDP Virtual interface delayed next poll by {}", delay);
Some(tokio::time::Instant::now() + Duration::from_millis(delay.total_millis()))
},
None => None,
};
}
event = endpoint.recv() => {
match event {
Event::LocalData(port_forward, virtual_port, data) => {
if let Some(send_queue) = send_queue.get_mut(&virtual_port) {
// Client socket already exists
send_queue.push_back((port_forward, data));
} else {
// Client socket does not exist
let client_socket = UdpVirtualInterface::new_client_socket(self.source_peer_ip, virtual_port)?;
let client_handle = self.sockets.add(client_socket);
// Add handle to map
port_client_handle_map.insert(virtual_port, client_handle);
send_queue.insert(virtual_port, VecDeque::from(vec![(port_forward, data)]));
}
next_poll = None;
}
Event::VirtualDeviceFed(PortProtocol::Udp) => {
next_poll = None;
}
_ => {}
}
}
}
}
}
}
const fn addr_length(addr: &IpAddress) -> u8 {
match addr.version() {
IpVersion::Ipv4 => 32,
IpVersion::Ipv6 => 128,
}
}

413
src/wg.rs
View file

@ -1,54 +1,65 @@
use std::net::{IpAddr, SocketAddr};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use crate::Bus;
use anyhow::Context;
use async_recursion::async_recursion;
use boringtun::noise::errors::WireGuardError;
use boringtun::noise::{Tunn, TunnResult};
use futures::lock::Mutex;
use log::Level;
use smoltcp::wire::{IpProtocol, IpVersion, Ipv4Packet, Ipv6Packet};
use smoltcp::phy::ChecksumCapabilities;
use smoltcp::wire::{
IpAddress, IpProtocol, IpVersion, Ipv4Packet, Ipv4Repr, Ipv6Packet, Ipv6Repr, TcpControl,
TcpPacket, TcpRepr, TcpSeqNumber,
};
use tokio::net::UdpSocket;
use tokio::sync::Mutex;
use tokio::sync::broadcast::error::RecvError;
use crate::config::{Config, PortProtocol};
use crate::events::Event;
use crate::config::Config;
use crate::port_pool::PortPool;
use crate::MAX_PACKET;
/// The capacity of the channel for received IP packets.
pub const DISPATCH_CAPACITY: usize = 1_000;
const MAX_PACKET: usize = 65536;
/// The capacity of the broadcast channel for received IP packets.
const BROADCAST_CAPACITY: usize = 1_000;
/// A WireGuard tunnel. Encapsulates and decapsulates IP packets
/// to be sent to and received from a remote UDP endpoint.
/// This tunnel supports at most 1 peer IP at a time, but supports simultaneous ports.
pub struct WireGuardTunnel {
pub(crate) source_peer_ip: IpAddr,
source_peer_ip: IpAddr,
/// `boringtun` peer/tunnel implementation, used for crypto & WG protocol.
peer: Mutex<Box<Tunn>>,
peer: Box<Tunn>,
/// The UDP socket for the public WireGuard endpoint to connect to.
udp: UdpSocket,
/// The address of the public WireGuard endpoint (UDP).
pub(crate) endpoint: SocketAddr,
/// Event bus
bus: Bus,
endpoint: SocketAddr,
/// Broadcast sender for received IP packets.
ip_broadcast_tx: tokio::sync::broadcast::Sender<Vec<u8>>,
/// Sink so that the broadcaster doesn't close. A repeating task should drain this as much as possible.
ip_broadcast_rx_sink: Mutex<tokio::sync::broadcast::Receiver<Vec<u8>>>,
/// Port pool.
port_pool: Arc<PortPool>,
}
impl WireGuardTunnel {
/// Initialize a new WireGuard tunnel.
pub async fn new(config: &Config, bus: Bus) -> anyhow::Result<Self> {
pub async fn new(config: &Config, port_pool: Arc<PortPool>) -> anyhow::Result<Self> {
let source_peer_ip = config.source_peer_ip;
let peer = Mutex::new(Box::new(Self::create_tunnel(config)?));
let endpoint = config.endpoint_addr;
let udp = UdpSocket::bind(config.endpoint_bind_addr)
let peer = Self::create_tunnel(config)?;
let udp = UdpSocket::bind("0.0.0.0:0")
.await
.context("Failed to create UDP socket for WireGuard connection")?;
.with_context(|| "Failed to create UDP socket for WireGuard connection")?;
let endpoint = config.endpoint_addr;
let (ip_broadcast_tx, ip_broadcast_rx_sink) =
tokio::sync::broadcast::channel(BROADCAST_CAPACITY);
Ok(Self {
source_peer_ip,
peer,
udp,
endpoint,
bus,
ip_broadcast_tx,
ip_broadcast_rx_sink: Mutex::new(ip_broadcast_rx_sink),
port_pool,
})
}
@ -56,16 +67,12 @@ impl WireGuardTunnel {
pub async fn send_ip_packet(&self, packet: &[u8]) -> anyhow::Result<()> {
trace_ip_packet("Sending IP packet", packet);
let mut send_buf = [0u8; MAX_PACKET];
let encapsulate_result = {
let mut peer = self.peer.lock().await;
peer.encapsulate(packet, &mut send_buf)
};
match encapsulate_result {
match self.peer.encapsulate(packet, &mut send_buf) {
TunnResult::WriteToNetwork(packet) => {
self.udp
.send_to(packet, self.endpoint)
.await
.context("Failed to send encrypted IP packet to WireGuard endpoint.")?;
.with_context(|| "Failed to send encrypted IP packet to WireGuard endpoint.")?;
debug!(
"Sent {} bytes to WireGuard endpoint (encrypted IP packet)",
packet.len()
@ -87,20 +94,9 @@ impl WireGuardTunnel {
Ok(())
}
pub async fn produce_task(&self) -> ! {
trace!("Starting WireGuard production task");
let mut endpoint = self.bus.new_endpoint();
loop {
if let Event::OutboundInternetPacket(data) = endpoint.recv().await {
match self.send_ip_packet(&data).await {
Ok(_) => {}
Err(e) => {
error!("{:?}", e);
}
}
}
}
/// Create a new receiver for broadcasted IP packets, received from the WireGuard endpoint.
pub fn subscribe(&self) -> tokio::sync::broadcast::Receiver<Vec<u8>> {
self.ip_broadcast_tx.subscribe()
}
/// WireGuard Routine task. Handles Handshake, keep-alive, etc.
@ -109,62 +105,43 @@ impl WireGuardTunnel {
loop {
let mut send_buf = [0u8; MAX_PACKET];
let tun_result = { self.peer.lock().await.update_timers(&mut send_buf) };
self.handle_routine_tun_result(tun_result).await;
match self.peer.update_timers(&mut send_buf) {
TunnResult::WriteToNetwork(packet) => {
debug!(
"Sending routine packet of {} bytes to WireGuard endpoint",
packet.len()
);
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
Err(e) => {
error!(
"Failed to send routine packet to WireGuard endpoint: {:?}",
e
);
}
};
}
TunnResult::Err(e) => {
error!(
"Failed to prepare routine packet for WireGuard endpoint: {:?}",
e
);
}
TunnResult::Done => {
// Sleep for a bit
tokio::time::sleep(Duration::from_millis(1)).await;
}
other => {
warn!("Unexpected WireGuard routine task state: {:?}", other);
}
}
}
}
#[async_recursion]
async fn handle_routine_tun_result<'a: 'async_recursion>(&self, result: TunnResult<'a>) -> () {
match result {
TunnResult::WriteToNetwork(packet) => {
debug!(
"Sending routine packet of {} bytes to WireGuard endpoint",
packet.len()
);
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
Err(e) => {
error!(
"Failed to send routine packet to WireGuard endpoint: {:?}",
e
);
}
};
}
TunnResult::Err(WireGuardError::ConnectionExpired) => {
warn!("Wireguard handshake has expired!");
let mut buf = vec![0u8; MAX_PACKET];
let result = self
.peer
.lock()
.await
.format_handshake_initiation(&mut buf[..], false);
self.handle_routine_tun_result(result).await
}
TunnResult::Err(e) => {
error!(
"Failed to prepare routine packet for WireGuard endpoint: {:?}",
e
);
}
TunnResult::Done => {
// Sleep for a bit
tokio::time::sleep(Duration::from_millis(1)).await;
}
other => {
warn!("Unexpected WireGuard routine task state: {:?}", other);
}
};
}
/// WireGuard consumption task. Receives encrypted packets from the WireGuard endpoint,
/// decapsulates them, and dispatches newly received IP packets.
/// decapsulates them, and broadcasts newly received IP packets.
pub async fn consume_task(&self) -> ! {
trace!("Starting WireGuard consumption task");
let endpoint = self.bus.new_endpoint();
loop {
let mut recv_buf = [0u8; MAX_PACKET];
@ -181,11 +158,7 @@ impl WireGuardTunnel {
};
let data = &recv_buf[..size];
let decapsulate_result = {
let mut peer = self.peer.lock().await;
peer.decapsulate(None, data, &mut send_buf)
};
match decapsulate_result {
match self.peer.decapsulate(None, data, &mut send_buf) {
TunnResult::WriteToNetwork(packet) => {
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
@ -194,10 +167,9 @@ impl WireGuardTunnel {
continue;
}
};
let mut peer = self.peer.lock().await;
loop {
let mut send_buf = [0u8; MAX_PACKET];
match peer.decapsulate(None, &[], &mut send_buf) {
match self.peer.decapsulate(None, &[], &mut send_buf) {
TunnResult::WriteToNetwork(packet) => {
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
@ -222,8 +194,35 @@ impl WireGuardTunnel {
// For debugging purposes: parse packet
trace_ip_packet("Received IP packet", packet);
if let Some(proto) = self.route_protocol(packet) {
endpoint.send(Event::InboundInternetPacket(proto, packet.to_vec().into()));
match self.route_ip_packet(packet) {
RouteResult::Broadcast => {
// Broadcast IP packet
if self.ip_broadcast_tx.receiver_count() > 1 {
match self.ip_broadcast_tx.send(packet.to_vec()) {
Ok(n) => {
trace!(
"Broadcasted received IP packet to {} virtual interfaces",
n - 1
);
}
Err(e) => {
error!(
"Failed to broadcast received IP packet to recipients: {}",
e
);
}
}
}
}
RouteResult::TcpReset(packet) => {
trace!("Resetting dead TCP connection after packet from WireGuard endpoint");
self.send_ip_packet(&packet)
.await
.unwrap_or_else(|e| error!("Failed to sent TCP reset: {:?}", e));
}
RouteResult::Drop => {
trace!("Dropped incoming IP packet from WireGuard endpoint");
}
}
}
_ => {}
@ -231,48 +230,203 @@ impl WireGuardTunnel {
}
}
fn create_tunnel(config: &Config) -> anyhow::Result<Tunn> {
let private = config.private_key.as_ref().clone();
let public = *config.endpoint_public_key.as_ref();
/// A repeating task that drains the default IP broadcast channel receiver.
/// It is necessary to keep this receiver alive to prevent the overall channel from closing,
/// so draining its backlog regularly is required to avoid memory leaks.
pub async fn broadcast_drain_task(&self) {
trace!("Starting IP broadcast sink drain task");
loop {
let mut sink = self.ip_broadcast_rx_sink.lock().await;
match sink.recv().await {
Ok(_) => {
trace!("Drained a packet from IP broadcast sink");
}
Err(e) => match e {
RecvError::Closed => {
trace!("IP broadcast sink finished draining: channel closed");
break;
}
RecvError::Lagged(_) => {
warn!("IP broadcast sink is falling behind");
}
},
}
}
trace!("Stopped IP broadcast sink drain");
}
fn create_tunnel(config: &Config) -> anyhow::Result<Box<Tunn>> {
Tunn::new(
private,
public,
config.preshared_key,
config.private_key.clone(),
config.endpoint_public_key.clone(),
None,
config.keepalive_seconds,
0,
None,
)
.map_err(|s| anyhow::anyhow!("{}", s))
.context("Failed to initialize boringtun Tunn")
.with_context(|| "Failed to initialize boringtun Tunn")
}
/// Determine the inner protocol of the incoming IP packet (TCP/UDP).
fn route_protocol(&self, packet: &[u8]) -> Option<PortProtocol> {
/// Makes a decision on the handling of an incoming IP packet.
fn route_ip_packet(&self, packet: &[u8]) -> RouteResult {
match IpVersion::of_packet(packet) {
Ok(IpVersion::Ipv4) => Ipv4Packet::new_checked(&packet)
.ok()
// Only care if the packet is destined for this tunnel
.filter(|packet| packet.dst_addr() == self.source_peer_ip)
.and_then(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(PortProtocol::Tcp),
IpProtocol::Udp => Some(PortProtocol::Udp),
// Unrecognized protocol, so we cannot determine where to route
_ => None,
}),
.filter(|packet| Ipv4Addr::from(packet.dst_addr()) == self.source_peer_ip)
.map(|packet| match packet.protocol() {
IpProtocol::Tcp => Some(self.route_tcp_segment(
IpVersion::Ipv4,
packet.src_addr().into(),
packet.dst_addr().into(),
packet.payload(),
)),
// Unrecognized protocol, so we'll allow it.
_ => Some(RouteResult::Broadcast),
})
.flatten()
.unwrap_or(RouteResult::Drop),
Ok(IpVersion::Ipv6) => Ipv6Packet::new_checked(&packet)
.ok()
// Only care if the packet is destined for this tunnel
.filter(|packet| packet.dst_addr() == self.source_peer_ip)
.and_then(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(PortProtocol::Tcp),
IpProtocol::Udp => Some(PortProtocol::Udp),
// Unrecognized protocol, so we cannot determine where to route
_ => None,
}),
_ => None,
.filter(|packet| Ipv6Addr::from(packet.dst_addr()) == self.source_peer_ip)
.map(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(self.route_tcp_segment(
IpVersion::Ipv6,
packet.src_addr().into(),
packet.dst_addr().into(),
packet.payload(),
)),
// Unrecognized protocol, so we'll allow it.
_ => Some(RouteResult::Broadcast),
})
.flatten()
.unwrap_or(RouteResult::Drop),
_ => RouteResult::Drop,
}
}
/// Makes a decision on the handling of an incoming TCP segment.
fn route_tcp_segment(
&self,
ip_version: IpVersion,
src_addr: IpAddress,
dst_addr: IpAddress,
segment: &[u8],
) -> RouteResult {
TcpPacket::new_checked(segment)
.ok()
.map(|tcp| {
if self.port_pool.is_in_use(tcp.dst_port()) {
RouteResult::Broadcast
} else if tcp.rst() {
RouteResult::Drop
} else {
// Port is not in use, but it's a TCP packet so we'll craft a RST.
RouteResult::TcpReset(craft_tcp_rst_reply(
ip_version,
src_addr,
tcp.src_port(),
dst_addr,
tcp.dst_port(),
tcp.ack_number(),
))
}
})
.unwrap_or(RouteResult::Drop)
}
}
/// Craft an IP packet containing a TCP RST segment, given an IP version,
/// source address (the one to reply to), destination address (the one the reply comes from),
/// and the ACK number received in the initiating TCP segment.
fn craft_tcp_rst_reply(
ip_version: IpVersion,
source_addr: IpAddress,
source_port: u16,
dest_addr: IpAddress,
dest_port: u16,
ack_number: TcpSeqNumber,
) -> Vec<u8> {
let tcp_repr = TcpRepr {
src_port: dest_port,
dst_port: source_port,
control: TcpControl::Rst,
seq_number: ack_number,
ack_number: None,
window_len: 0,
window_scale: None,
max_seg_size: None,
sack_permitted: false,
sack_ranges: [None, None, None],
payload: &[],
};
let mut tcp_buffer = vec![0u8; 20];
let mut tcp_packet = &mut TcpPacket::new_unchecked(&mut tcp_buffer);
tcp_repr.emit(
&mut tcp_packet,
&dest_addr,
&source_addr,
&ChecksumCapabilities::default(),
);
let mut ip_buffer = vec![0u8; MAX_PACKET];
let (header_len, total_len) = match ip_version {
IpVersion::Ipv4 => {
let dest_addr = match dest_addr {
IpAddress::Ipv4(dest_addr) => dest_addr,
_ => panic!(),
};
let source_addr = match source_addr {
IpAddress::Ipv4(source_addr) => source_addr,
_ => panic!(),
};
let mut ip_packet = &mut Ipv4Packet::new_unchecked(&mut ip_buffer);
let ip_repr = Ipv4Repr {
src_addr: dest_addr,
dst_addr: source_addr,
protocol: IpProtocol::Tcp,
payload_len: tcp_buffer.len(),
hop_limit: 64,
};
ip_repr.emit(&mut ip_packet, &ChecksumCapabilities::default());
(
ip_packet.header_len() as usize,
ip_packet.total_len() as usize,
)
}
IpVersion::Ipv6 => {
let dest_addr = match dest_addr {
IpAddress::Ipv6(dest_addr) => dest_addr,
_ => panic!(),
};
let source_addr = match source_addr {
IpAddress::Ipv6(source_addr) => source_addr,
_ => panic!(),
};
let mut ip_packet = &mut Ipv6Packet::new_unchecked(&mut ip_buffer);
let ip_repr = Ipv6Repr {
src_addr: dest_addr,
dst_addr: source_addr,
next_header: IpProtocol::Tcp,
payload_len: tcp_buffer.len(),
hop_limit: 64,
};
ip_repr.emit(&mut ip_packet);
(ip_packet.header_len(), ip_packet.total_len())
}
_ => panic!(),
};
ip_buffer[header_len..total_len].copy_from_slice(&tcp_buffer);
let packet: &[u8] = &ip_buffer[..total_len];
packet.to_vec()
}
fn trace_ip_packet(message: &str, packet: &[u8]) {
@ -294,3 +448,12 @@ fn trace_ip_packet(message: &str, packet: &[u8]) {
}
}
}
enum RouteResult {
/// The packet can be broadcasted to the virtual interfaces
Broadcast,
/// The packet is not routable so it may be reset.
TcpReset(Vec<u8>),
/// The packet can be safely ignored.
Drop,
}