Compare commits

..

141 commits

Author SHA1 Message Date
3ca5ae2181
Merge pull request #85 from aramperes/dependabot/cargo/async-trait-0.1.87
build(deps): bump async-trait from 0.1.83 to 0.1.87
2025-03-10 23:32:19 -04:00
ac83ddbd4d
Merge pull request #86 from aramperes/dependabot/cargo/anyhow-1.0.97
build(deps): bump anyhow from 1.0.94 to 1.0.97
2025-03-10 23:32:12 -04:00
17f424140d
Merge pull request #87 from aramperes/dependabot/cargo/tokio-1.44.0
build(deps): bump tokio from 1.42.0 to 1.44.0
2025-03-10 23:31:55 -04:00
dependabot[bot]
8030ca1a2d
build(deps): bump tokio from 1.42.0 to 1.44.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.42.0 to 1.44.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.42.0...tokio-1.44.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 16:58:22 +00:00
dependabot[bot]
7eddf3f17f
build(deps): bump anyhow from 1.0.94 to 1.0.97
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.94 to 1.0.97.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.94...1.0.97)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 16:47:17 +00:00
dependabot[bot]
bcfa43702a
build(deps): bump async-trait from 0.1.83 to 0.1.87
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.83 to 0.1.87.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.83...0.1.87)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 16:47:00 +00:00
d0fcab38c3 docs: update README and LICENSE 2025-01-25 21:45:38 -05:00
c83c9ec500
Merge pull request #67 from aramperes/dependabot/cargo/priority-queue-2.1.1 2024-12-11 19:56:22 -05:00
caadd415cd
Merge pull request #68 from aramperes/dependabot/cargo/pretty_env_logger-0.5.0 2024-12-11 19:55:18 -05:00
3a89f2877d
Merge pull request #69 from aramperes/dependabot/cargo/anyhow-1.0.94 2024-12-11 19:54:56 -05:00
341849762c
Merge pull request #70 from aramperes/dependabot/cargo/tokio-1.42.0 2024-12-11 19:54:32 -05:00
dependabot[bot]
57e6ddc74c
Bump tokio from 1.41.1 to 1.42.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.41.1 to 1.42.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.41.1...tokio-1.42.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-09 17:28:44 +00:00
dependabot[bot]
08d99b9d22
Bump anyhow from 1.0.93 to 1.0.94
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.93 to 1.0.94.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.93...1.0.94)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-09 17:28:37 +00:00
dependabot[bot]
2661a2d29f
Bump pretty_env_logger from 0.4.0 to 0.5.0
Bumps [pretty_env_logger](https://github.com/seanmonstar/pretty-env-logger) from 0.4.0 to 0.5.0.
- [Commits](https://github.com/seanmonstar/pretty-env-logger/compare/v0.4.0...v0.5.0)

---
updated-dependencies:
- dependency-name: pretty_env_logger
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 03:38:35 +00:00
dependabot[bot]
6722237902
Bump priority-queue from 1.4.0 to 2.1.1
Bumps [priority-queue](https://github.com/garro95/priority-queue) from 1.4.0 to 2.1.1.
- [Release notes](https://github.com/garro95/priority-queue/releases)
- [Commits](https://github.com/garro95/priority-queue/commits/2.1.1)

---
updated-dependencies:
- dependency-name: priority-queue
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 03:38:32 +00:00
83ef02c695
Create dependabot.yml 2024-12-01 22:37:33 -05:00
89c3b59610 fix: typo 2024-12-01 16:03:32 -05:00
c6544cfe05 fix: assume path is target 2024-12-01 16:01:27 -05:00
f75909fd8f fix: expected output location in release 2024-12-01 15:56:37 -05:00
52d1d589ac use cross to cross-build 2024-12-01 15:45:25 -05:00
eb9c0be437 force cargo build with target 2024-12-01 15:39:16 -05:00
d307a11819 release: v0.3.10 2024-12-01 15:33:13 -05:00
c4c52babae
Merge pull request #65 from aramperes/smoltcp-0.12 2024-12-01 15:31:03 -05:00
6b2f6148c6 chore: add linux-aarch64 build 2024-12-01 15:29:59 -05:00
991eef0311 chore: update MSRV to 1.80.0 2024-12-01 15:27:12 -05:00
0e93a6435a chore: udpate to smoltcp 0.12 2024-12-01 15:22:37 -05:00
ca3590a4c0 chore: bump minor dependencies 2024-12-01 15:13:46 -05:00
784ab97c8b release: v0.3.9; add macos-aarch64 build 2024-12-01 12:41:23 -05:00
f3661c0a2c fix docker build 2024-12-01 12:33:56 -05:00
4fa8304799 bump MSRV to 1.78.0 2024-12-01 12:30:13 -05:00
1f3d9f035f release: v0.3.8 2024-12-01 12:28:13 -05:00
06049161ab bump MSRV to 1.74.0 2024-12-01 12:27:41 -05:00
e26cca089f
Merge pull request #64 from aramperes/fix/63 2024-12-01 12:08:24 -05:00
88ce124544 formatting 2024-12-01 12:03:51 -05:00
9ccd2e19f6 increase default smoltcp interface limit and add to README 2024-12-01 12:03:41 -05:00
c86784ed70 log a better error regarding smoltcp max interface limit 2024-12-01 11:33:53 -05:00
e25c88410e
Merge pull request #61 from PeterDaveHelloKitchen/OptimizeDockerfile 2024-04-07 20:38:08 -04:00
Peter Dave Hello
2b6d21572e
Optimize apt-get commands to reduce image size in Dockerfile
This commit improves the Dockerfile by consolidating apt-get update and apt-get install commands into a single RUN statement and adding cleanup steps for the apt cache.
2024-04-07 01:32:37 +08:00
56c950d159 Use bail when possible 2023-12-24 15:23:12 -05:00
ce40f85efa Cleanup usage of anyhow with_context 2023-12-24 15:06:22 -05:00
3ccd000ea8 Minor dependency updates 2023-12-24 14:58:51 -05:00
5fd28164b5
Merge pull request #60 from aramperes/patch/boringtun-0.6 2023-12-24 14:45:45 -05:00
1d703facc0 Implement locking of Tunn in WireGuardTunnel 2023-12-24 14:42:34 -05:00
e23cfc3e7e Update to new x25519 primitives 2023-12-24 11:52:07 -05:00
0931ed496a update boringtun to 0.6.0 2023-12-24 11:51:28 -05:00
91e6c79832
Merge pull request #59 from aramperes/patch/smoltcp-0.11 2023-12-24 11:31:00 -05:00
72ab679142 update to smoltcp 0.11 2023-12-24 11:28:15 -05:00
10b88ccc60 cleanup: SockSet can be owned by static
ref: https://github.com/smoltcp-rs/smoltcp/pull/813
2023-12-24 11:23:58 -05:00
83beb48b07 release: v0.3.7 2023-12-23 21:59:55 -05:00
21fe78f540 Add docs/example for SOCKS proxy 2023-12-23 21:44:25 -05:00
c3b752678e
Merge pull request #58 from aramperes/patch/smoltcp-0.10 2023-12-23 21:23:41 -05:00
32f189e53a Revert virtual port for server socket 2023-12-23 21:01:35 -05:00
488a0e0807 remove AnyIP; fix IPv6 virtual addresses 2023-12-23 21:01:00 -05:00
ssrlive
38fc217a29 smoltcp version 0.10 applied 2023-12-23 20:22:01 -05:00
ae15b4203c release: v0.3.6 2023-12-21 15:34:08 -05:00
992e55bf91
Merge pull request #55 from lu-zero/update-clap 2023-12-21 15:31:27 -05:00
a100f90a92 chore: update MSRV to 1.70.0 2023-12-21 15:29:43 -05:00
Luca Barbato
1613d2bb5c Bump clap version 2023-12-21 15:24:44 -05:00
Luca Barbato
29fb98f02b Update deps 2023-12-21 15:24:44 -05:00
767b83d241
Merge pull request #57 from aramperes/chore/update-docker-rust-version 2023-12-21 15:21:58 -05:00
731218d943 Fix new clippy warnings 2023-12-21 15:16:57 -05:00
7200cc07e7 chore: update MSRV to 1.65.0 2023-12-20 17:31:39 -05:00
Marco Nalon
1997ae7ea8 chore: update Dockerfile rust version 1.63.0 -> 1.65.0 2023-12-20 16:59:27 +01:00
9f53198f17 Remove $ from README examples 2023-10-02 19:55:24 -04:00
998d1cfc8d Add maintenance disclaimer 2023-10-02 19:51:18 -04:00
0a06df59f9 Update copyright year 2023-10-02 18:17:52 -04:00
61da97f4aa Update release action to latest Ubuntu 2023-10-02 18:05:30 -04:00
c5e803192f Disable macos package install 2023-10-02 17:57:15 -04:00
2896a4fcdb Update dependencies and bump MSRV to 1.63 2023-10-02 17:55:05 -04:00
07e895c770 release: v0.3.5 2023-10-02 17:37:18 -04:00
c0d0a5cb02
Merge pull request #51 from george-hopkins/psk 2023-10-02 17:29:15 -04:00
1333ea8a7c Rename option to --preshared-key and add to README 2023-10-02 17:21:32 -04:00
6f143280d1 Pin older version of base64 for now 2023-10-02 17:07:37 -04:00
George Hopkins
653c314409 Support pre-shared key 2023-10-02 16:24:37 +02:00
43a20ef6b3 Update dependencies 2023-01-12 02:53:56 -05:00
4f935c5a2d reorder dep 2023-01-12 02:38:36 -05:00
5dc04d9614
Merge pull request #49 from aramperes/bytes 2023-01-12 01:46:41 -05:00
fa634a08dc Fix a clippy warning 2023-01-12 01:43:32 -05:00
76b6a6e346 Use bytes 2023-01-12 01:40:04 -05:00
e62b7d30fe release: v0.3.4 2022-09-25 17:29:04 -04:00
0553fce5c6 chore: bump msrv to 1.57 2022-09-25 17:24:03 -04:00
6c64531940 chore: update dependencies 2022-09-25 17:19:16 -04:00
77981371fc
Merge pull request #45 from TitanNano/issues/44 2022-09-25 16:55:24 -04:00
Jovan Gerodetti
fbc76e3fb0 Handle WireGuardError::ConnectionExpired #44 2022-09-25 22:34:03 +02:00
85195d8aba
Merge pull request #41 from samhug/stackoverflow
Fix stack overflow on windows
2022-08-20 09:17:14 -04:00
eb9b8ff15d
Merge pull request #42 from kianmeng/fix-typos-and-markdowns 2022-08-12 09:53:09 -04:00
Kian-Meng Ang
074e1b430c Fix typos and markdowns
Found via these commands:

    codespell -L crate
    markdownlint -f README.md --disable MD013 MD033 MD041
2022-08-12 18:45:14 +08:00
Sam Hug
cea343c2c9 heap alloc WireGuardTunnel::consume_task() future 2022-08-11 15:32:24 -07:00
aef90a5c0c Add --endpoint-bind-addr to README 2022-07-18 20:41:48 -04:00
b78cab58ee release: v0.3.3 2022-06-25 15:05:10 -04:00
8cee210ccb Expose boringtun x25519 primitives 2022-06-25 14:38:08 -04:00
96be421495 Increase MSRV to 1.56.1 2022-06-25 14:03:18 -04:00
c09a541788 Update dependencies 2022-06-25 13:55:26 -04:00
371a55bb71 release: 0.3.2 2022-06-25 11:15:16 -04:00
00b45f8cb4 Update to Edition 2021 and fix docker build 2022-06-25 11:12:23 -04:00
75bad318f4 release: v0.3.1 2022-06-25 10:54:47 -04:00
8c1bdb1700 Minimize tokio features 2022-06-25 10:49:37 -04:00
1a560434d4 Fix cargo check action 2022-06-25 10:39:44 -04:00
48eaf0f840 Allow onetun to be used as a library 2022-06-25 10:33:37 -04:00
f85692950f Split dependencies that are only used for the binary version of onetun 2022-06-24 01:45:31 -04:00
1c1399d5ff
Merge pull request #34 from SideStore/host-address-binding 2022-06-24 01:22:14 -04:00
Jackson Coxson
4162f62ae6 Change the error message from host to bind 2022-06-23 23:14:57 -06:00
Jackson Coxson
9bd7ec2cca Simplify IP version detection 2022-06-23 23:11:45 -06:00
Jackson Coxson
1680b17c47 Correct binding terminoligy IP version detection 2022-06-23 23:10:48 -06:00
Jackson Coxson
96e18edd19 Invert logic for IP version mismatch 2022-06-23 23:10:11 -06:00
a81f5fe5e6 Simplify README intro 2022-06-24 01:01:53 -04:00
Jackson Coxson
c647bc9a96 Rename host_addr to endpoint_bind_addr 2022-06-23 23:01:32 -06:00
14df68ecc9 Simplify README intro 2022-06-24 01:01:26 -04:00
Jackson Coxson
3ab108ad04 Move host address resolution logic to config 2022-06-23 22:59:19 -06:00
c8a62debb1 Simplify README intro 2022-06-24 00:57:28 -04:00
Jackson Coxson
b108b5f404 Clarify help instructions for host binding 2022-06-23 22:47:33 -06:00
Jackson Coxson
5e94a0f31e Add host address binding option 2022-06-22 23:06:16 -06:00
73671a4d07 Add argument and env variable for remote port forwarding.
Part of #6
2022-06-21 18:38:55 -04:00
52aba0115d Fix new clippy lint 2022-03-27 17:14:18 -04:00
472a4df69f README adjustments 2022-03-27 14:15:25 -04:00
7ebf8e0737 release: v0.3.0 2022-02-15 02:01:31 -05:00
bcd840f838
Merge pull request #33 from aramperes/boringtun-0.4.0 2022-02-15 01:45:21 -05:00
a44b8b48eb Update README 2022-02-15 01:41:13 -05:00
93116fae26 Update boringtun to 0.4.0 2022-02-15 01:31:41 -05:00
648154b5ee Add tcpdump example 2022-01-10 01:32:55 -05:00
45962f4356 Update Architecture section in README 2022-01-10 01:25:56 -05:00
47c6c588d2 udp: remove extra socket iteration in virtual iface 2022-01-10 00:46:52 -05:00
782f5e74bf Apply TcpStream fix to UdpSocket as well 2022-01-10 00:35:14 -05:00
2b15e581f2 release: v0.2.9 2022-01-09 22:58:16 -05:00
e99fe6b8fb
Merge pull request #32 from aramperes/22-fix 2022-01-09 22:57:32 -05:00
11f86c49d6 Ensure all bytes are written to TcpStream
Fixes #22
2022-01-09 22:52:48 -05:00
def5f22d3c release: v0.2.8 2022-01-08 17:50:29 -05:00
e06b6526b7 Process more than one UDP socket per poll 2022-01-08 17:49:07 -05:00
3b296d66c5 release: v0.2.7 2022-01-08 17:40:29 -05:00
1aadea86d5
Merge pull request #30 from aramperes/pcap 2022-01-08 17:39:53 -05:00
ff0f5b967e Add optional IP packet capture for WireGuard tunnel 2022-01-08 17:30:10 -05:00
953bc18279 Remove some clippy suppressions for udp files 2022-01-08 15:20:21 -05:00
b3776c8b05 release: v0.2.6 2022-01-08 15:12:23 -05:00
d9bccb79e5 Process all TCP virtual client sockets in one poll 2022-01-08 15:11:59 -05:00
daa2362915 release: v0.2.5 2022-01-08 14:52:06 -05:00
025c001abb Remove event tracing when reading from bus 2022-01-08 14:48:04 -05:00
2b18bd4ec3 Remove unused dependency. Improve trace logging perf. 2022-01-08 14:41:12 -05:00
2e204d80fd release: v0.2.4 2022-01-08 03:44:10 -05:00
5b388f2ea3
Merge pull request #28 from aramperes/bus-based 2022-01-08 03:42:29 -05:00
abd9df6be4 Implement event-based UDP interface 2022-01-08 03:40:20 -05:00
51788c9557 Improve reliability using event-based synchronization 2022-01-08 02:18:51 -05:00
25 changed files with 2558 additions and 1523 deletions

4
.cargo/config.toml Normal file
View file

@ -0,0 +1,4 @@
[env]
# Each interface needs 1 IP allocated to the WireGuard peer IP.
# "8" = 7 tunnels per protocol.
SMOLTCP_IFACE_MAX_ADDR_COUNT = "8"

View file

@ -1,6 +1,6 @@
#!/bin/sh #!/bin/sh
brew install asciidoctor # brew install asciidoctor
brew install openssl@1.1 # brew install openssl@1.1
cp /usr/local/opt/openssl@1.1/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/ # cp /usr/local/opt/openssl@1.1/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/

10
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,10 @@
# Please see the documentation for all configuration options:
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
rebase-strategy: "disabled"

View file

@ -10,7 +10,7 @@ jobs:
matrix: matrix:
rust: rust:
- stable - stable
- 1.55.0 - 1.80.0
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v2 uses: actions/checkout@v2
@ -26,6 +26,12 @@ jobs:
with: with:
command: check command: check
- name: Run cargo check without default features
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features
test: test:
name: Test Suite name: Test Suite
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -33,7 +39,7 @@ jobs:
matrix: matrix:
rust: rust:
- stable - stable
- 1.55.0 - 1.80.0
steps: steps:
- name: Checkout sources - name: Checkout sources
uses: actions/checkout@v2 uses: actions/checkout@v2

View file

@ -61,7 +61,7 @@ jobs:
run: echo "${{ env.VERSION }}" > artifacts/release-version run: echo "${{ env.VERSION }}" > artifacts/release-version
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v1 uses: actions/upload-artifact@v4
with: with:
name: artifacts name: artifacts
path: artifacts path: artifacts
@ -75,20 +75,28 @@ jobs:
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
strategy: strategy:
matrix: matrix:
build: [ linux-amd64, macos-intel, windows ] build: [ linux-amd64, linux-aarch64, macos-aarch64, windows ]
include: include:
- build: linux-amd64 - build: linux-amd64
os: ubuntu-18.04 os: ubuntu-latest
rust: stable rust: stable
target: x86_64-unknown-linux-musl target: x86_64-unknown-linux-musl
- build: macos-intel cross: true
- build: linux-aarch64
os: ubuntu-latest
rust: stable
target: aarch64-unknown-linux-musl
cross: true
- build: macos-aarch64
os: macos-latest os: macos-latest
rust: stable rust: stable
target: x86_64-apple-darwin target: aarch64-apple-darwin
cross: false
- build: windows - build: windows
os: windows-2019 os: windows-2019
rust: stable rust: stable
target: x86_64-pc-windows-msvc target: x86_64-pc-windows-msvc
cross: false
steps: steps:
- name: Checkout repository - name: Checkout repository
@ -97,7 +105,7 @@ jobs:
fetch-depth: 1 fetch-depth: 1
- name: Install packages (Ubuntu) - name: Install packages (Ubuntu)
if: matrix.os == 'ubuntu-18.04' if: matrix.os == 'ubuntu-latest'
run: | run: |
.github/ci/ubuntu-install-packages .github/ci/ubuntu-install-packages
- name: Install packages (macOS) - name: Install packages (macOS)
@ -113,7 +121,7 @@ jobs:
target: ${{ matrix.target }} target: ${{ matrix.target }}
- name: Get release download URL - name: Get release download URL
uses: actions/download-artifact@v1 uses: actions/download-artifact@v4
with: with:
name: artifacts name: artifacts
path: artifacts path: artifacts
@ -126,17 +134,24 @@ jobs:
echo "release upload url: $release_upload_url" echo "release upload url: $release_upload_url"
- name: Build onetun binary - name: Build onetun binary
run: cargo build --release shell: bash
run: |
if [ "${{ matrix.cross }}" = "true" ]; then
cargo install cross
cross build --release --target ${{ matrix.target }}
else
cargo build --release --target ${{ matrix.target }}
fi
- name: Prepare onetun binary - name: Prepare onetun binary
shell: bash shell: bash
run: | run: |
mkdir -p ci/assets mkdir -p ci/assets
if [ "${{ matrix.build }}" = "windows" ]; then if [ "${{ matrix.build }}" = "windows" ]; then
cp "target/release/onetun.exe" "ci/assets/onetun.exe" cp "target/${{ matrix.target }}/release/onetun.exe" "ci/assets/onetun.exe"
echo "ASSET=onetun.exe" >> $GITHUB_ENV echo "ASSET=onetun.exe" >> $GITHUB_ENV
else else
cp "target/release/onetun" "ci/assets/onetun-${{ matrix.build }}" cp "target/${{ matrix.target }}/release/onetun" "ci/assets/onetun-${{ matrix.build }}"
echo "ASSET=onetun-${{ matrix.build }}" >> $GITHUB_ENV echo "ASSET=onetun-${{ matrix.build }}" >> $GITHUB_ENV
fi fi

2
.gitignore vendored
View file

@ -2,3 +2,5 @@
/.idea /.idea
.envrc .envrc
*.log *.log
*.pcap
.DS_Store

1176
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,21 +1,48 @@
[package] [package]
name = "onetun" name = "onetun"
version = "0.2.3" version = "0.3.10"
edition = "2018" edition = "2021"
license = "MIT"
description = "A cross-platform, user-space WireGuard port-forwarder that requires no system network configurations."
authors = ["Aram Peres <aram.peres@gmail.com>"]
repository = "https://github.com/aramperes/onetun"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
boringtun = { git = "https://github.com/cloudflare/boringtun", rev = "fbcf2689e7776a5af805c5a38feb5c8988829980", default-features = false } # Required dependencies (bin and lib)
clap = { version = "2.33", default-features = false, features = ["suggestions"] } boringtun = { version = "0.6.0", default-features = false }
log = "0.4" log = "0.4"
pretty_env_logger = "0.4"
anyhow = "1" anyhow = "1"
smoltcp = { version = "0.8.0", default-features = false, features = ["std", "log", "medium-ip", "proto-ipv4", "proto-ipv6", "socket-udp", "socket-tcp"] } tokio = { version = "1", features = [ "rt", "sync", "io-util", "net", "time", "fs", "macros" ] }
tokio = { version = "1", features = ["full"] } futures = "0.3"
futures = "0.3.17" rand = "0.8"
rand = "0.8.4"
nom = "7" nom = "7"
async-trait = "0.1.51" async-trait = "0.1"
dashmap = "4.0.2" priority-queue = "2.1"
priority-queue = "1.2.0" smoltcp = { version = "0.12", default-features = false, features = [
"std",
"log",
"medium-ip",
"proto-ipv4",
"proto-ipv6",
"socket-udp",
"socket-tcp",
] }
bytes = "1"
base64 = "0.13"
# forward boringtuns tracing events to log
tracing = { version = "0.1", default-features = false, features = ["log"] }
# bin-only dependencies
clap = { version = "4.4.11", default-features = false, features = ["suggestions", "std", "env", "help", "wrap_help"], optional = true }
pretty_env_logger = { version = "0.5", optional = true }
async-recursion = "1.0"
[features]
pcap = []
default = [ "bin" ]
bin = [ "clap", "pretty_env_logger", "pcap", "tokio/rt-multi-thread" ]
[lib]

View file

@ -1,10 +1,11 @@
FROM rust:1.55 as cargo-build FROM rust:1.82.0 as cargo-build
WORKDIR /usr/src/onetun WORKDIR /usr/src/onetun
COPY Cargo.toml Cargo.toml COPY Cargo.toml Cargo.toml
# Placeholder to download dependencies and cache them using layering # Placeholder to download dependencies and cache them using layering
RUN mkdir src/ RUN mkdir src/
RUN touch src/lib.rs
RUN echo "fn main() {println!(\"if you see this, the build broke\")}" > src/main.rs RUN echo "fn main() {println!(\"if you see this, the build broke\")}" > src/main.rs
RUN cargo build --release RUN cargo build --release
RUN rm -f target/x86_64-unknown-linux-musl/release/deps/myapp* RUN rm -f target/x86_64-unknown-linux-musl/release/deps/myapp*
@ -14,8 +15,9 @@ COPY . .
RUN cargo build --release RUN cargo build --release
FROM debian:11-slim FROM debian:11-slim
RUN apt-get update RUN apt-get update \
RUN apt-get install dumb-init -y && apt-get install dumb-init -y \
&& rm -rf /var/lib/apt/lists/*
COPY --from=cargo-build /usr/src/onetun/target/release/onetun /usr/local/bin/onetun COPY --from=cargo-build /usr/src/onetun/target/release/onetun /usr/local/bin/onetun

View file

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2021 Aram Peres Copyright (c) 2025 Aram Peres
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

204
README.md
View file

@ -2,20 +2,48 @@
# onetun # onetun
A cross-platform, user-space WireGuard port-forwarder that requires no system network configurations. A cross-platform, user-space WireGuard port-forwarder that requires **no root-access or system network configurations**.
[![crates.io](https://img.shields.io/crates/v/onetun.svg)](https://crates.io/crates/onetun)
[![MIT licensed](https://img.shields.io/crates/l/onetun.svg)](./LICENSE)
[![Build status](https://github.com/aramperes/onetun/actions/workflows/build.yml/badge.svg)](https://github.com/aramperes/onetun/actions) [![Build status](https://github.com/aramperes/onetun/actions/workflows/build.yml/badge.svg)](https://github.com/aramperes/onetun/actions)
[![Latest Release](https://img.shields.io/github/v/tag/aramperes/onetun?label=release)](https://github.com/aramperes/onetun/releases/latest) [![Latest Release](https://img.shields.io/github/v/tag/aramperes/onetun?label=release)](https://github.com/aramperes/onetun/releases/latest)
## Use-case ## Use-case
- You have an existing WireGuard endpoint (router), accessible using its UDP endpoint (typically port 51820); and Access TCP or UDP services running on your WireGuard network, from devices that don't have WireGuard installed.
- You have a peer on the WireGuard network, running a TCP or UDP service on a port accessible to the WireGuard network; and
- You want to access this TCP or UDP service from a second computer, on which you can't install WireGuard because you
can't (no root access) or don't want to (polluting OS configs).
For example, this can be useful to forward a port from a Kubernetes cluster to a server behind WireGuard, For example,
without needing to install WireGuard in a Pod.
- Personal or shared computers where you can't install WireGuard (root)
- IoT and mobile devices
- Root-less containers
## Download
onetun is available to install from [crates.io](https://crates.io/crates/onetun) with Rust ≥1.80.0:
```shell
cargo install onetun
```
You can also download the binary for Windows, macOS (Apple Silicon), and Linux (amd64, arm64) from
the [Releases](https://github.com/aramperes/onetun/releases) page.
You can also run onetun using [Docker](https://hub.docker.com/r/aramperes/onetun):
```shell
docker run --rm --name onetun --user 1000 -p 8080:8080 aramperes/onetun \
0.0.0.0:8080:192.168.4.2:8080 [...options...]
```
You can also build onetun locally, using Rust ≥1.80.0:
```shell
git clone https://github.com/aramperes/onetun && cd onetun
cargo build --release
./target/release/onetun
```
## Usage ## Usage
@ -26,14 +54,14 @@ access, or install any WireGuard tool on your local system for it to work.
The only prerequisite is to register a peer IP and public key on the remote WireGuard endpoint; those are necessary for The only prerequisite is to register a peer IP and public key on the remote WireGuard endpoint; those are necessary for
the WireGuard endpoint to trust the onetun peer and for packets to be routed. the WireGuard endpoint to trust the onetun peer and for packets to be routed.
``` ```shell
./onetun [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...] [...] \ onetun [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...] [...] \
--endpoint-addr <public WireGuard endpoint address> \ --endpoint-addr <public WireGuard endpoint address> \
--endpoint-public-key <the public key of the peer on the endpoint> \ --endpoint-public-key <the public key of the peer on the endpoint> \
--private-key <private key assigned to onetun> \ --private-key <private key assigned to onetun> \
--source-peer-ip <IP assigned to onetun> \ --source-peer-ip <IP assigned to onetun> \
--keep-alive <optional persistent keep-alive in seconds> \ --keep-alive <optional persistent keep-alive in seconds> \
--log <optional log level, defaults to "info" --log <optional log level, defaults to "info">
``` ```
> Note: you can use environment variables for all of these flags. Use `onetun --help` for details. > Note: you can use environment variables for all of these flags. Use `onetun --help` for details.
@ -42,7 +70,7 @@ the WireGuard endpoint to trust the onetun peer and for packets to be routed.
Suppose your WireGuard endpoint has the following configuration, and is accessible from `140.30.3.182:51820`: Suppose your WireGuard endpoint has the following configuration, and is accessible from `140.30.3.182:51820`:
``` ```shell
# /etc/wireguard/wg0.conf # /etc/wireguard/wg0.conf
[Interface] [Interface]
@ -65,7 +93,7 @@ We want to access a web server on the friendly peer (`192.168.4.2`) on port `808
local port, say `127.0.0.1:8080`, that will tunnel through WireGuard to reach the peer web server: local port, say `127.0.0.1:8080`, that will tunnel through WireGuard to reach the peer web server:
```shell ```shell
./onetun 127.0.0.1:8080:192.168.4.2:8080 \ onetun 127.0.0.1:8080:192.168.4.2:8080 \
--endpoint-addr 140.30.3.182:51820 \ --endpoint-addr 140.30.3.182:51820 \
--endpoint-public-key 'PUB_****************************************' \ --endpoint-public-key 'PUB_****************************************' \
--private-key 'PRIV_BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' \ --private-key 'PRIV_BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' \
@ -75,14 +103,14 @@ local port, say `127.0.0.1:8080`, that will tunnel through WireGuard to reach th
You'll then see this log: You'll then see this log:
``` ```shell
INFO onetun > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
``` ```
Which means you can now access the port locally! Which means you can now access the port locally!
``` ```shell
$ curl 127.0.0.1:8080 curl 127.0.0.1:8080
Hello world! Hello world!
``` ```
@ -90,24 +118,32 @@ Hello world!
**onetun** supports running multiple tunnels in parallel. For example: **onetun** supports running multiple tunnels in parallel. For example:
``` ```shell
$ ./onetun 127.0.0.1:8080:192.168.4.2:8080 127.0.0.1:8081:192.168.4.4:8081 onetun 127.0.0.1:8080:192.168.4.2:8080 127.0.0.1:8081:192.168.4.4:8081
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8081]->[192.168.4.4:8081] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8081]->[192.168.4.4:8081] (via [140.30.3.182:51820] as peer 192.168.4.3)
``` ```
... would open TCP ports 8080 and 8081 locally, which forward to their respective ports on the different peers. ... would open TCP ports 8080 and 8081 locally, which forward to their respective ports on the different peers.
#### Maximum number of tunnels
`smoltcp` imposes a compile-time limit on the number of IP addresses assigned to an interface. **onetun** increases
the default value to support most use-cases. In effect, the default limit on the number of **onetun** peers
is **7 per protocol** (TCP and UDP).
Should you need more unique IP addresses to forward ports to, you can increase the limit in `.cargo/config.toml` and recompile **onetun**.
### UDP Support ### UDP Support
**onetun** supports UDP forwarding. You can add `:UDP` at the end of the port-forward configuration, or `UDP,TCP` to support **onetun** supports UDP forwarding. You can add `:UDP` at the end of the port-forward configuration, or `UDP,TCP` to support
both protocols on the same port (note that this opens 2 separate tunnels, just on the same port) both protocols on the same port (note that this opens 2 separate tunnels, just on the same port)
``` ```shell
$ ./onetun 127.0.0.1:8080:192.168.4.2:8080:UDP onetun 127.0.0.1:8080:192.168.4.2:8080:UDP
INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
$ ./onetun 127.0.0.1:8080:192.168.4.2:8080:UDP,TCP onetun 127.0.0.1:8080:192.168.4.2:8080:UDP,TCP
INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
``` ```
@ -119,61 +155,77 @@ it in any production capacity.
**onetun** supports both IPv4 and IPv6. In fact, you can use onetun to forward some IP version to another, e.g. 6-to-4: **onetun** supports both IPv4 and IPv6. In fact, you can use onetun to forward some IP version to another, e.g. 6-to-4:
``` ```shell
$ ./onetun [::1]:8080:192.168.4.2:8080 onetun [::1]:8080:192.168.4.2:8080
INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
``` ```
Note that each tunnel can only support one "source" IP version and one "destination" IP version. If you want to support Note that each tunnel can only support one "source" IP version and one "destination" IP version. If you want to support
both IPv4 and IPv6 on the same port, you should create a second port-forward: both IPv4 and IPv6 on the same port, you should create a second port-forward:
``` ```shell
$ ./onetun [::1]:8080:192.168.4.2:8080 127.0.0.1:8080:192.168.4.2:8080 onetun [::1]:8080:192.168.4.2:8080 127.0.0.1:8080:192.168.4.2:8080
INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3) INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
``` ```
## Download ### Packet Capture
Normally I would publish `onetun` to crates.io. However, it depends on some features For debugging purposes, you can enable the capture of IP packets sent between onetun and the WireGuard peer.
in [smoltcp](https://github.com/smoltcp-rs/smoltcp) and The output is a libpcap capture file that can be viewed with Wireshark.
[boringtun](https://github.com/cloudflare/boringtun) that haven't been published yet, so I'm forced to use their Git
repos as dependencies for now.
In the meantime, you can download the binary for Windows, macOS (Intel), and Linux (amd64) from
the [Releases](https://github.com/aramperes/onetun/releases) page.
You can also run onetun using [Docker](https://hub.docker.com/r/aramperes/onetun):
```shell ```shell
docker run --rm --name onetun --user 1000 -p 8080:8080 aramperes/onetun \ onetun --pcap wg.pcap 127.0.0.1:8080:192.168.4.2:8080
0.0.0.0:8080:192.168.4.2:8080 [...options...] INFO onetun::pcap > Capturing WireGuard IP packets to wg.pcap
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
``` ```
You can also build onetun locally, using Rust: To capture packets sent to and from the onetun local port, you must use an external tool like `tcpdump` with root access:
```shell ```shell
$ git clone https://github.com/aramperes/onetun && cd onetun sudo tcpdump -i lo -w local.pcap 'dst 127.0.0.1 && port 8080'
$ cargo build --release ```
$ ./target/release/onetun
### WireGuard Options
By default, onetun will create the UDP socket to communicate with the WireGuard endpoint on all interfaces and on a dynamic port,
i.e. `0.0.0.0:0` for IPv4 endpoints, or `[::]:0` for IPv6.
You can bind to a static address instead using `--endpoint-bind-addr`:
```shell
onetun --endpoint-bind-addr 0.0.0.0:51820 --endpoint-addr 140.30.3.182:51820 [...]
```
The security of the WireGuard connection can be further enhanced with a **pre-shared key** (PSK). You can generate such a key with the `wg genpsk` command, and provide it using `--preshared-key`.
The peer must also have this key configured using the `PresharedKey` option.
```shell
onetun --preshared-key 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' [...]
``` ```
## Architecture ## Architecture
**In short:** onetun uses [smoltcp's](https://github.com/smoltcp-rs/smoltcp) TCP/IP and UDP stack to generate IP packets
using its state machine ("virtual interface"). The generated IP packets are
encrypted by [boringtun](https://github.com/cloudflare/boringtun) and sent to the WireGuard endpoint. Encrypted IP packets received
from the WireGuard endpoint are decrypted using boringtun and sent through the smoltcp virtual interface state machine.
onetun creates "virtual sockets" in the virtual interface to forward data sent from inbound connections,
as well as to receive data from the virtual interface to forward back to the local client.
---
onetun uses [tokio](https://github.com/tokio-rs/tokio), the async runtime, to listen for new TCP connections on the onetun uses [tokio](https://github.com/tokio-rs/tokio), the async runtime, to listen for new TCP connections on the
given port. given port.
When a client connects to the local TCP port, it uses [smoltcp](https://github.com/smoltcp-rs/smoltcp) to When a client connects to the onetun's TCP port, a "virtual client" is
create a "virtual interface", with a "virtual client" and a "virtual server" for the connection. These "virtual" created in a [smoltcp](https://github.com/smoltcp-rs/smoltcp) "virtual" TCP/IP interface, which runs fully inside the onetun
components are the crux of how onetun works. They essentially replace the host's TCP/IP stack with smoltcp's, which process. An ephemeral "virtual port" is assigned to the "virtual client", which maps back to the local client.
fully runs inside onetun. An ephemeral "virtual port" is also assigned to the connection, in order to route packets
back to the right connection.
When the real client opens the connection, the virtual client socket opens a TCP connection to the virtual server. When the real client opens the connection, the virtual client socket opens a TCP connection to the virtual server
The virtual interface (implemented by smoltcp) in turn crafts the `SYN` segment and wraps it in an IP packet. (a dummy socket bound to the remote host/port). The virtual interface in turn crafts the `SYN` segment and wraps it in an IP packet.
Because of how the virtual client and server are configured, the IP packet is crafted with a source address Because of how the virtual client and server are configured, the IP packet is crafted with a source address
being the configured `source-peer-ip` (`192.168.4.3` in the example above), being the configured `source-peer-ip` (`192.168.4.3` in the example above),
and the destination address is the remote peer's (`192.168.4.2`). and the destination address matches the port-forward's configured destination (`192.168.4.2`).
By doing this, we let smoltcp handle the crafting of the IP packets, and the handling of the client's TCP states. By doing this, we let smoltcp handle the crafting of the IP packets, and the handling of the client's TCP states.
Instead of actually sending those packets to the virtual server, Instead of actually sending those packets to the virtual server,
@ -184,8 +236,8 @@ Once the WireGuard endpoint receives an encrypted IP packet, it decrypts it usin
It reads the destination address, re-encrypts the IP packet using the matching peer's public key, and sends it off to It reads the destination address, re-encrypts the IP packet using the matching peer's public key, and sends it off to
the peer's UDP endpoint. the peer's UDP endpoint.
The remote peer receives the encrypted IP and decrypts it. It can then read the inner payload (the TCP segment), The peer receives the encrypted IP and decrypts it. It can then read the inner payload (the TCP segment),
forward it to the server's port, which handles the TCP segment. The server responds with `SYN-ACK`, which goes back through forward it to the server's port, which handles the TCP segment. The TCP server responds with `SYN-ACK`, which goes back through
the peer's local WireGuard interface, gets encrypted, forwarded to the WireGuard endpoint, and then finally back to onetun's UDP port. the peer's local WireGuard interface, gets encrypted, forwarded to the WireGuard endpoint, and then finally back to onetun's UDP port.
When onetun receives an encrypted packet from the WireGuard endpoint, it decrypts it using boringtun. When onetun receives an encrypted packet from the WireGuard endpoint, it decrypts it using boringtun.
@ -215,6 +267,56 @@ if the least recently used port hasn't been used for a certain amount of time. I
All in all, I would not recommend using UDP forwarding for public services, since it's most likely prone to simple DoS or DDoS. All in all, I would not recommend using UDP forwarding for public services, since it's most likely prone to simple DoS or DDoS.
## HTTP/SOCKS Proxy
**onetun** is a Transport-layer proxy (also known as port forwarding); it is not in scope to provide
a HTTP/SOCKS proxy server. However, you can easily chain **onetun** with a proxy server on a remote
that is locked down to your WireGuard network.
For example, you could run [dante-server](https://www.inet.no/dante/) on a peer (ex. `192.168.4.2`) with the following configuration:
```
# /etc/danted.conf
logoutput: syslog
user.privileged: root
user.unprivileged: nobody
internal: 192.168.4.2 port=1080
external: eth0
socksmethod: none
clientmethod: none
# Locks down proxy use to WireGuard peers (192.168.4.x)
client pass {
from: 192.168.4.0/24 to: 0.0.0.0/0
}
socks pass {
from: 192.168.4.0/24 to: 0.0.0.0/0
}
```
Then use **onetun** to expose the SOCKS5 proxy locally:
```shell
onetun 127.0.0.1:1080:192.168.4.2:1080
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:1080]->[192.168.4.2:1080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Test with `curl` (or configure your browser):
```shell
curl -x socks5://127.0.0.1:1080 https://ifconfig.me
```
## Contributing and Maintenance
I will gladly accept contributions to onetun, and set aside time to review all pull-requests.
Please consider opening a GitHub issue if you are unsure if your contribution is within the scope of the project.
**Disclaimer**: I do not have enough personal time to actively maintain onetun besides open-source contributions.
## License ## License
MIT. See `LICENSE` for details. MIT License. See `LICENSE` for details. Copyright &copy; 2025 Aram Peres.

View file

@ -5,35 +5,43 @@ use std::fs::read_to_string;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use std::sync::Arc; use std::sync::Arc;
use anyhow::Context; use anyhow::{bail, Context};
use boringtun::crypto::{X25519PublicKey, X25519SecretKey}; pub use boringtun::x25519::{PublicKey, StaticSecret};
use clap::{App, Arg};
#[derive(Clone, Debug)] const DEFAULT_PORT_FORWARD_SOURCE: &str = "127.0.0.1";
#[derive(Clone)]
pub struct Config { pub struct Config {
pub(crate) port_forwards: Vec<PortForwardConfig>, pub port_forwards: Vec<PortForwardConfig>,
pub(crate) private_key: Arc<X25519SecretKey>, #[allow(dead_code)]
pub(crate) endpoint_public_key: Arc<X25519PublicKey>, pub remote_port_forwards: Vec<PortForwardConfig>,
pub(crate) endpoint_addr: SocketAddr, pub private_key: Arc<StaticSecret>,
pub(crate) source_peer_ip: IpAddr, pub endpoint_public_key: Arc<PublicKey>,
pub(crate) keepalive_seconds: Option<u16>, pub preshared_key: Option<[u8; 32]>,
pub(crate) max_transmission_unit: usize, pub endpoint_addr: SocketAddr,
pub(crate) log: String, pub endpoint_bind_addr: SocketAddr,
pub(crate) warnings: Vec<String>, pub source_peer_ip: IpAddr,
pub keepalive_seconds: Option<u16>,
pub max_transmission_unit: usize,
pub log: String,
pub warnings: Vec<String>,
pub pcap_file: Option<String>,
} }
impl Config { impl Config {
#[cfg(feature = "bin")]
pub fn from_args() -> anyhow::Result<Self> { pub fn from_args() -> anyhow::Result<Self> {
use clap::{Arg, Command};
let mut warnings = vec![]; let mut warnings = vec![];
let matches = App::new("onetun") let matches = Command::new("onetun")
.author("Aram Peres <aram.peres@gmail.com>") .author("Aram Peres <aram.peres@gmail.com>")
.version(env!("CARGO_PKG_VERSION")) .version(env!("CARGO_PKG_VERSION"))
.args(&[ .args(&[
Arg::with_name("PORT_FORWARD") Arg::new("PORT_FORWARD")
.required(false) .required(false)
.multiple(true) .num_args(1..)
.takes_value(true)
.help("Port forward configurations. The format of each argument is [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \ .help("Port forward configurations. The format of each argument is [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \
where [src_host] is the local IP to listen on, <src_port> is the local port to listen on, <dst_host> is the remote peer IP to forward to, and <dst_port> is the remote port to forward to. \ where [src_host] is the local IP to listen on, <src_port> is the local port to listen on, <dst_host> is the remote peer IP to forward to, and <dst_port> is the remote port to forward to. \
Environment variables of the form 'ONETUN_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\ Environment variables of the form 'ONETUN_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\
@ -47,61 +55,94 @@ impl Config {
\tlocalhost:8080:192.168.4.1:8081:TCP\n\ \tlocalhost:8080:192.168.4.1:8081:TCP\n\
\tlocalhost:8080:peer.intranet:8081:TCP\ \tlocalhost:8080:peer.intranet:8081:TCP\
"), "),
Arg::with_name("private-key") Arg::new("private-key")
.required_unless("private-key-file") .conflicts_with("private-key-file")
.takes_value(true) .num_args(1)
.long("private-key") .long("private-key")
.env("ONETUN_PRIVATE_KEY") .env("ONETUN_PRIVATE_KEY")
.help("The private key of this peer. The corresponding public key should be registered in the WireGuard endpoint. \ .help("The private key of this peer. The corresponding public key should be registered in the WireGuard endpoint. \
You can also use '--private-key-file' to specify a file containing the key instead."), You can also use '--private-key-file' to specify a file containing the key instead."),
Arg::with_name("private-key-file") Arg::new("private-key-file")
.takes_value(true) .num_args(1)
.long("private-key-file") .long("private-key-file")
.env("ONETUN_PRIVATE_KEY_FILE") .env("ONETUN_PRIVATE_KEY_FILE")
.help("The path to a file containing the private key of this peer. The corresponding public key should be registered in the WireGuard endpoint."), .help("The path to a file containing the private key of this peer. The corresponding public key should be registered in the WireGuard endpoint."),
Arg::with_name("endpoint-public-key") Arg::new("endpoint-public-key")
.required(true) .required(true)
.takes_value(true) .num_args(1)
.long("endpoint-public-key") .long("endpoint-public-key")
.env("ONETUN_ENDPOINT_PUBLIC_KEY") .env("ONETUN_ENDPOINT_PUBLIC_KEY")
.help("The public key of the WireGuard endpoint (remote)."), .help("The public key of the WireGuard endpoint (remote)."),
Arg::with_name("endpoint-addr") Arg::new("preshared-key")
.required(false)
.num_args(1)
.long("preshared-key")
.env("ONETUN_PRESHARED_KEY")
.help("The pre-shared key (PSK) as configured with the peer."),
Arg::new("endpoint-addr")
.required(true) .required(true)
.takes_value(true) .num_args(1)
.long("endpoint-addr") .long("endpoint-addr")
.env("ONETUN_ENDPOINT_ADDR") .env("ONETUN_ENDPOINT_ADDR")
.help("The address (IP + port) of the WireGuard endpoint (remote). Example: 1.2.3.4:51820"), .help("The address (IP + port) of the WireGuard endpoint (remote). Example: 1.2.3.4:51820"),
Arg::with_name("source-peer-ip") Arg::new("endpoint-bind-addr")
.required(false)
.num_args(1)
.long("endpoint-bind-addr")
.env("ONETUN_ENDPOINT_BIND_ADDR")
.help("The address (IP + port) used to bind the local UDP socket for the WireGuard tunnel. Example: 1.2.3.4:30000. Defaults to 0.0.0.0:0 for IPv4 endpoints, or [::]:0 for IPv6 endpoints."),
Arg::new("source-peer-ip")
.required(true) .required(true)
.takes_value(true) .num_args(1)
.long("source-peer-ip") .long("source-peer-ip")
.env("ONETUN_SOURCE_PEER_IP") .env("ONETUN_SOURCE_PEER_IP")
.help("The source IP to identify this peer as (local). Example: 192.168.4.3"), .help("The source IP to identify this peer as (local). Example: 192.168.4.3"),
Arg::with_name("keep-alive") Arg::new("keep-alive")
.required(false) .required(false)
.takes_value(true) .num_args(1)
.long("keep-alive") .long("keep-alive")
.env("ONETUN_KEEP_ALIVE") .env("ONETUN_KEEP_ALIVE")
.help("Configures a persistent keep-alive for the WireGuard tunnel, in seconds."), .help("Configures a persistent keep-alive for the WireGuard tunnel, in seconds."),
Arg::with_name("max-transmission-unit") Arg::new("max-transmission-unit")
.required(false) .required(false)
.takes_value(true) .num_args(1)
.long("max-transmission-unit") .long("max-transmission-unit")
.env("ONETUN_MTU") .env("ONETUN_MTU")
.default_value("1420") .default_value("1420")
.help("Configures the max-transmission-unit (MTU) of the WireGuard tunnel."), .help("Configures the max-transmission-unit (MTU) of the WireGuard tunnel."),
Arg::with_name("log") Arg::new("log")
.required(false) .required(false)
.takes_value(true) .num_args(1)
.long("log") .long("log")
.env("ONETUN_LOG") .env("ONETUN_LOG")
.default_value("info") .default_value("info")
.help("Configures the log level and format.") .help("Configures the log level and format."),
Arg::new("pcap")
.required(false)
.num_args(1)
.long("pcap")
.env("ONETUN_PCAP")
.help("Decrypts and captures IP packets on the WireGuard tunnel to a given output file."),
Arg::new("remote")
.required(false)
.num_args(1..)
.long("remote")
.short('r')
.help("Remote port forward configurations. The format of each argument is <src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \
where <src_port> is the port the other peers will reach the server with, <dst_host> is the IP to forward to, and <dst_port> is the port to forward to. \
The <src_port> will be bound on onetun's peer IP, as specified by --source-peer-ip. If you pass a different value for <src_host> here, it will be rejected.\n\
Note: <dst_host>:<dst_port> must be reachable by onetun. If referring to another WireGuard peer, use --bridge instead (not supported yet).\n\
Environment variables of the form 'ONETUN_REMOTE_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\
Examples:\n\
\t--remote 8080:localhost:8081:TCP,UDP\n\
\t--remote 8080:[::1]:8081:TCP\n\
\t--remote 8080:google.com:80\
"),
]).get_matches(); ]).get_matches();
// Combine `PORT_FORWARD` arg and `ONETUN_PORT_FORWARD_#` envs // Combine `PORT_FORWARD` arg and `ONETUN_PORT_FORWARD_#` envs
let mut port_forward_strings = HashSet::new(); let mut port_forward_strings = HashSet::new();
if let Some(values) = matches.values_of("PORT_FORWARD") { if let Some(values) = matches.get_many::<String>("PORT_FORWARD") {
for value in values { for value in values {
port_forward_strings.insert(value.to_owned()); port_forward_strings.insert(value.to_owned());
} }
@ -113,26 +154,68 @@ impl Config {
break; break;
} }
} }
if port_forward_strings.is_empty() {
return Err(anyhow::anyhow!("No port forward configurations given."));
}
// Parse `PORT_FORWARD` strings into `PortForwardConfig` // Parse `PORT_FORWARD` strings into `PortForwardConfig`
let port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> = port_forward_strings let port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> = port_forward_strings
.into_iter() .into_iter()
.map(|s| PortForwardConfig::from_notation(&s)) .map(|s| PortForwardConfig::from_notation(&s, DEFAULT_PORT_FORWARD_SOURCE))
.collect(); .collect();
let port_forwards: Vec<PortForwardConfig> = port_forwards let port_forwards: Vec<PortForwardConfig> = port_forwards
.with_context(|| "Failed to parse port forward config")? .context("Failed to parse port forward config")?
.into_iter() .into_iter()
.flatten() .flatten()
.collect(); .collect();
// Read source-peer-ip
let source_peer_ip = parse_ip(matches.get_one::<String>("source-peer-ip"))
.context("Invalid source peer IP")?;
// Combined `remote` arg and `ONETUN_REMOTE_PORT_FORWARD_#` envs
let mut port_forward_strings = HashSet::new();
if let Some(values) = matches.get_many::<String>("remote") {
for value in values {
port_forward_strings.insert(value.to_owned());
}
}
for n in 1.. {
if let Ok(env) = std::env::var(format!("ONETUN_REMOTE_PORT_FORWARD_{}", n)) {
port_forward_strings.insert(env);
} else {
break;
}
}
// Parse `PORT_FORWARD` strings into `PortForwardConfig`
let remote_port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> =
port_forward_strings
.into_iter()
.map(|s| {
PortForwardConfig::from_notation(
&s,
matches.get_one::<String>("source-peer-ip").unwrap(),
)
})
.collect();
let mut remote_port_forwards: Vec<PortForwardConfig> = remote_port_forwards
.context("Failed to parse remote port forward config")?
.into_iter()
.flatten()
.collect();
for port_forward in remote_port_forwards.iter_mut() {
if port_forward.source.ip() != source_peer_ip {
bail!("Remote port forward config <src_host> must match --source-peer-ip ({}), or be omitted.", source_peer_ip);
}
port_forward.source = SocketAddr::from((source_peer_ip, port_forward.source.port()));
port_forward.remote = true;
}
if port_forwards.is_empty() && remote_port_forwards.is_empty() {
bail!("No port forward configurations given.");
}
// Read private key from file or CLI argument // Read private key from file or CLI argument
let (group_readable, world_readable) = matches let (group_readable, world_readable) = matches
.value_of("private-key-file") .get_one::<String>("private-key-file")
.map(is_file_insecurely_readable) .and_then(is_file_insecurely_readable)
.flatten()
.unwrap_or_default(); .unwrap_or_default();
if group_readable { if group_readable {
warnings.push("Private key file is group-readable. This is insecure.".into()); warnings.push("Private key file is group-readable. This is insecure.".into());
@ -141,71 +224,116 @@ impl Config {
warnings.push("Private key file is world-readable. This is insecure.".into()); warnings.push("Private key file is world-readable. This is insecure.".into());
} }
let private_key = if let Some(private_key_file) = matches.value_of("private-key-file") { let private_key = if let Some(private_key_file) =
matches.get_one::<String>("private-key-file")
{
read_to_string(private_key_file) read_to_string(private_key_file)
.map(|s| s.trim().to_string()) .map(|s| s.trim().to_string())
.with_context(|| "Failed to read private key file") .context("Failed to read private key file")
} else { } else {
if std::env::var("ONETUN_PRIVATE_KEY").is_err() { if std::env::var("ONETUN_PRIVATE_KEY").is_err() {
warnings.push("Private key was passed using CLI. This is insecure. \ warnings.push("Private key was passed using CLI. This is insecure. \
Use \"--private-key-file <file containing private key>\", or the \"ONETUN_PRIVATE_KEY\" env variable instead.".into()); Use \"--private-key-file <file containing private key>\", or the \"ONETUN_PRIVATE_KEY\" env variable instead.".into());
} }
matches matches
.value_of("private-key") .get_one::<String>("private-key")
.map(String::from) .cloned()
.with_context(|| "Missing private key") .context("Missing private key")
}?; }?;
let endpoint_addr = parse_addr(matches.get_one::<String>("endpoint-addr"))
.context("Invalid endpoint address")?;
let endpoint_bind_addr = if let Some(addr) = matches.get_one::<String>("endpoint-bind-addr")
{
let addr = parse_addr(Some(addr)).context("Invalid bind address")?;
// Make sure the bind address and endpoint address are the same IP version
if addr.ip().is_ipv4() != endpoint_addr.ip().is_ipv4() {
bail!("Endpoint and bind addresses must be the same IP version");
}
addr
} else {
// Return the IP version of the endpoint address
match endpoint_addr {
SocketAddr::V4(_) => parse_addr(Some("0.0.0.0:0"))?,
SocketAddr::V6(_) => parse_addr(Some("[::]:0"))?,
}
};
Ok(Self { Ok(Self {
port_forwards, port_forwards,
private_key: Arc::new( remote_port_forwards,
parse_private_key(&private_key).with_context(|| "Invalid private key")?, private_key: Arc::new(parse_private_key(&private_key).context("Invalid private key")?),
),
endpoint_public_key: Arc::new( endpoint_public_key: Arc::new(
parse_public_key(matches.value_of("endpoint-public-key")) parse_public_key(matches.get_one::<String>("endpoint-public-key"))
.with_context(|| "Invalid endpoint public key")?, .context("Invalid endpoint public key")?,
), ),
endpoint_addr: parse_addr(matches.value_of("endpoint-addr")) preshared_key: parse_preshared_key(matches.get_one::<String>("preshared-key"))?,
.with_context(|| "Invalid endpoint address")?, endpoint_addr,
source_peer_ip: parse_ip(matches.value_of("source-peer-ip")) endpoint_bind_addr,
.with_context(|| "Invalid source peer IP")?, source_peer_ip,
keepalive_seconds: parse_keep_alive(matches.value_of("keep-alive")) keepalive_seconds: parse_keep_alive(matches.get_one::<String>("keep-alive"))
.with_context(|| "Invalid keep-alive value")?, .context("Invalid keep-alive value")?,
max_transmission_unit: parse_mtu(matches.value_of("max-transmission-unit")) max_transmission_unit: parse_mtu(matches.get_one::<String>("max-transmission-unit"))
.with_context(|| "Invalid max-transmission-unit value")?, .context("Invalid max-transmission-unit value")?,
log: matches.value_of("log").unwrap_or_default().into(), log: matches
.get_one::<String>("log")
.cloned()
.unwrap_or_default(),
pcap_file: matches.get_one::<String>("pcap").cloned(),
warnings, warnings,
}) })
} }
} }
fn parse_addr(s: Option<&str>) -> anyhow::Result<SocketAddr> { fn parse_addr<T: AsRef<str>>(s: Option<T>) -> anyhow::Result<SocketAddr> {
s.with_context(|| "Missing address")? s.context("Missing address")?
.as_ref()
.to_socket_addrs() .to_socket_addrs()
.with_context(|| "Invalid address")? .context("Invalid address")?
.next() .next()
.with_context(|| "Could not lookup address") .context("Could not lookup address")
} }
fn parse_ip(s: Option<&str>) -> anyhow::Result<IpAddr> { fn parse_ip(s: Option<&String>) -> anyhow::Result<IpAddr> {
s.with_context(|| "Missing IP")? s.context("Missing IP address")?
.parse::<IpAddr>() .parse::<IpAddr>()
.with_context(|| "Invalid IP address") .context("Invalid IP address")
} }
fn parse_private_key(s: &str) -> anyhow::Result<X25519SecretKey> { fn parse_private_key(s: &str) -> anyhow::Result<StaticSecret> {
s.parse::<X25519SecretKey>() let decoded = base64::decode(s).context("Failed to decode private key")?;
.map_err(|e| anyhow::anyhow!("{}", e)) if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(StaticSecret::from(bytes))
} else {
bail!("Invalid private key")
}
} }
fn parse_public_key(s: Option<&str>) -> anyhow::Result<X25519PublicKey> { fn parse_public_key(s: Option<&String>) -> anyhow::Result<PublicKey> {
s.with_context(|| "Missing public key")? let encoded = s.context("Missing public key")?;
.parse::<X25519PublicKey>() let decoded = base64::decode(encoded).context("Failed to decode public key")?;
.map_err(|e| anyhow::anyhow!("{}", e)) if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
.with_context(|| "Invalid public key") Ok(PublicKey::from(bytes))
} else {
bail!("Invalid public key")
}
} }
fn parse_keep_alive(s: Option<&str>) -> anyhow::Result<Option<u16>> { fn parse_preshared_key(s: Option<&String>) -> anyhow::Result<Option<[u8; 32]>> {
if let Some(s) = s {
let decoded = base64::decode(s).context("Failed to decode preshared key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(Some(bytes))
} else {
bail!("Invalid preshared key")
}
} else {
Ok(None)
}
}
fn parse_keep_alive(s: Option<&String>) -> anyhow::Result<Option<u16>> {
if let Some(s) = s { if let Some(s) = s {
let parsed: u16 = s.parse().with_context(|| { let parsed: u16 = s.parse().with_context(|| {
format!( format!(
@ -219,23 +347,21 @@ fn parse_keep_alive(s: Option<&str>) -> anyhow::Result<Option<u16>> {
} }
} }
fn parse_mtu(s: Option<&str>) -> anyhow::Result<usize> { fn parse_mtu(s: Option<&String>) -> anyhow::Result<usize> {
s.with_context(|| "Missing MTU")? s.context("Missing MTU")?.parse().context("Invalid MTU")
.parse()
.with_context(|| "Invalid MTU")
} }
#[cfg(unix)] #[cfg(unix)]
fn is_file_insecurely_readable(path: &str) -> Option<(bool, bool)> { fn is_file_insecurely_readable(path: &String) -> Option<(bool, bool)> {
use std::fs::File; use std::fs::File;
use std::os::unix::fs::MetadataExt; use std::os::unix::fs::MetadataExt;
let mode = File::open(&path).ok()?.metadata().ok()?.mode(); let mode = File::open(path).ok()?.metadata().ok()?.mode();
Some((mode & 0o40 > 0, mode & 0o4 > 0)) Some((mode & 0o40 > 0, mode & 0o4 > 0))
} }
#[cfg(not(unix))] #[cfg(not(unix))]
fn is_file_insecurely_readable(path: &str) -> Option<(bool, bool)> { fn is_file_insecurely_readable(_path: &String) -> Option<(bool, bool)> {
// No good way to determine permissions on non-Unix target // No good way to determine permissions on non-Unix target
None None
} }
@ -248,6 +374,8 @@ pub struct PortForwardConfig {
pub destination: SocketAddr, pub destination: SocketAddr,
/// The transport protocol to use for the port (Layer 4). /// The transport protocol to use for the port (Layer 4).
pub protocol: PortProtocol, pub protocol: PortProtocol,
/// Whether this is a remote port forward.
pub remote: bool,
} }
impl PortForwardConfig { impl PortForwardConfig {
@ -270,7 +398,7 @@ impl PortForwardConfig {
/// - IPv6 addresses must be prefixed with `[` and suffixed with `]`. Example: `[::1]`. /// - IPv6 addresses must be prefixed with `[` and suffixed with `]`. Example: `[::1]`.
/// - Any `u16` is accepted as `src_port` and `dst_port` /// - Any `u16` is accepted as `src_port` and `dst_port`
/// - Specifying protocols (`PROTO1,PROTO2,...`) is optional and defaults to `TCP`. Values must be separated by commas. /// - Specifying protocols (`PROTO1,PROTO2,...`) is optional and defaults to `TCP`. Values must be separated by commas.
pub fn from_notation(s: &str) -> anyhow::Result<Vec<PortForwardConfig>> { pub fn from_notation(s: &str, default_source: &str) -> anyhow::Result<Vec<PortForwardConfig>> {
mod parsers { mod parsers {
use nom::branch::alt; use nom::branch::alt;
use nom::bytes::complete::is_not; use nom::bytes::complete::is_not;
@ -348,28 +476,22 @@ impl PortForwardConfig {
.1; .1;
let source = ( let source = (
src_addr.0.unwrap_or("127.0.0.1"), src_addr.0.unwrap_or(default_source),
src_addr src_addr.1.parse::<u16>().context("Invalid source port")?,
.1
.parse::<u16>()
.with_context(|| "Invalid source port")?,
) )
.to_socket_addrs() .to_socket_addrs()
.with_context(|| "Invalid source address")? .context("Invalid source address")?
.next() .next()
.with_context(|| "Could not resolve source address")?; .context("Could not resolve source address")?;
let destination = ( let destination = (
dst_addr.0, dst_addr.0,
dst_addr dst_addr.1.parse::<u16>().context("Invalid source port")?,
.1
.parse::<u16>()
.with_context(|| "Invalid source port")?,
) )
.to_socket_addrs() // TODO: Pass this as given and use DNS config instead (issue #15) .to_socket_addrs() // TODO: Pass this as given and use DNS config instead (issue #15)
.with_context(|| "Invalid destination address")? .context("Invalid destination address")?
.next() .next()
.with_context(|| "Could not resolve destination address")?; .context("Could not resolve destination address")?;
// Parse protocols // Parse protocols
let protocols = if let Some(protocols) = protocols { let protocols = if let Some(protocols) = protocols {
@ -379,7 +501,7 @@ impl PortForwardConfig {
} else { } else {
Ok(vec![PortProtocol::Tcp]) Ok(vec![PortProtocol::Tcp])
} }
.with_context(|| "Failed to parse protocols")?; .context("Failed to parse protocols")?;
// Returns an config for each protocol // Returns an config for each protocol
Ok(protocols Ok(protocols
@ -388,6 +510,7 @@ impl PortForwardConfig {
source, source,
destination, destination,
protocol, protocol,
remote: false,
}) })
.collect()) .collect())
} }
@ -395,13 +518,24 @@ impl PortForwardConfig {
impl Display for PortForwardConfig { impl Display for PortForwardConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}:{}:{}", self.source, self.destination, self.protocol) if self.remote {
write!(
f,
"(remote){}:{}:{}",
self.source, self.destination, self.protocol
)
} else {
write!(f, "{}:{}:{}", self.source, self.destination, self.protocol)
}
} }
} }
/// Layer 7 protocols for ports.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)] #[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub enum PortProtocol { pub enum PortProtocol {
/// TCP
Tcp, Tcp,
/// UDP
Udp, Udp,
} }
@ -440,18 +574,23 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_1() { fn test_parse_port_forward_config_1() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("192.168.0.1:8080:192.168.4.1:8081:TCP,UDP") PortForwardConfig::from_notation(
.expect("Failed to parse"), "192.168.0.1:8080:192.168.4.1:8081:TCP,UDP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![ vec![
PortForwardConfig { PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(), source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}, },
PortForwardConfig { PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(), source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Udp protocol: PortProtocol::Udp,
remote: false,
} }
] ]
); );
@ -460,12 +599,16 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_2() { fn test_parse_port_forward_config_2() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("192.168.0.1:8080:192.168.4.1:8081:TCP") PortForwardConfig::from_notation(
.expect("Failed to parse"), "192.168.0.1:8080:192.168.4.1:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(), source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }
@ -473,12 +616,16 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_3() { fn test_parse_port_forward_config_3() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("0.0.0.0:8080:192.168.4.1:8081") PortForwardConfig::from_notation(
.expect("Failed to parse"), "0.0.0.0:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: SocketAddr::from_str("0.0.0.0:8080").unwrap(), source: SocketAddr::from_str("0.0.0.0:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }
@ -486,12 +633,16 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_4() { fn test_parse_port_forward_config_4() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("[::1]:8080:192.168.4.1:8081") PortForwardConfig::from_notation(
.expect("Failed to parse"), "[::1]:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: SocketAddr::from_str("[::1]:8080").unwrap(), source: SocketAddr::from_str("[::1]:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }
@ -499,11 +650,13 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_5() { fn test_parse_port_forward_config_5() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("8080:192.168.4.1:8081").expect("Failed to parse"), PortForwardConfig::from_notation("8080:192.168.4.1:8081", DEFAULT_PORT_FORWARD_SOURCE)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: SocketAddr::from_str("127.0.0.1:8080").unwrap(), source: SocketAddr::from_str("127.0.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }
@ -511,11 +664,16 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_6() { fn test_parse_port_forward_config_6() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("8080:192.168.4.1:8081:TCP").expect("Failed to parse"), PortForwardConfig::from_notation(
"8080:192.168.4.1:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: SocketAddr::from_str("127.0.0.1:8080").unwrap(), source: SocketAddr::from_str("127.0.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }
@ -523,12 +681,16 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_7() { fn test_parse_port_forward_config_7() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("localhost:8080:192.168.4.1:8081") PortForwardConfig::from_notation(
.expect("Failed to parse"), "localhost:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(), source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(), destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }
@ -536,12 +698,16 @@ mod tests {
#[test] #[test]
fn test_parse_port_forward_config_8() { fn test_parse_port_forward_config_8() {
assert_eq!( assert_eq!(
PortForwardConfig::from_notation("localhost:8080:localhost:8081:TCP") PortForwardConfig::from_notation(
.expect("Failed to parse"), "localhost:8080:localhost:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig { vec![PortForwardConfig {
source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(), source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(),
destination: "localhost:8081".to_socket_addrs().unwrap().next().unwrap(), destination: "localhost:8081".to_socket_addrs().unwrap().next().unwrap(),
protocol: PortProtocol::Tcp protocol: PortProtocol::Tcp,
remote: false,
}] }]
); );
} }

190
src/events.rs Normal file
View file

@ -0,0 +1,190 @@
use bytes::Bytes;
use std::fmt::{Display, Formatter};
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use crate::config::PortForwardConfig;
use crate::virtual_iface::VirtualPort;
use crate::PortProtocol;
/// Events that go on the bus between the local server, smoltcp, and WireGuard.
#[derive(Debug, Clone)]
pub enum Event {
/// Dumb event with no data.
Dumb,
/// A new connection with the local server was initiated, and the given virtual port was assigned.
ClientConnectionInitiated(PortForwardConfig, VirtualPort),
/// A connection was dropped from the pool and should be closed in all interfaces.
ClientConnectionDropped(VirtualPort),
/// Data received by the local server that should be sent to the virtual server.
LocalData(PortForwardConfig, VirtualPort, Bytes),
/// Data received by the remote server that should be sent to the local client.
RemoteData(VirtualPort, Bytes),
/// IP packet received from the WireGuard tunnel that should be passed through the corresponding virtual device.
InboundInternetPacket(PortProtocol, Bytes),
/// IP packet to be sent through the WireGuard tunnel as crafted by the virtual device.
OutboundInternetPacket(Bytes),
/// Notifies that a virtual device read an IP packet.
VirtualDeviceFed(PortProtocol),
}
impl Display for Event {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Event::Dumb => {
write!(f, "Dumb{{}}")
}
Event::ClientConnectionInitiated(pf, vp) => {
write!(f, "ClientConnectionInitiated{{ pf={} vp={} }}", pf, vp)
}
Event::ClientConnectionDropped(vp) => {
write!(f, "ClientConnectionDropped{{ vp={} }}", vp)
}
Event::LocalData(pf, vp, data) => {
let size = data.len();
write!(f, "LocalData{{ pf={} vp={} size={} }}", pf, vp, size)
}
Event::RemoteData(vp, data) => {
let size = data.len();
write!(f, "RemoteData{{ vp={} size={} }}", vp, size)
}
Event::InboundInternetPacket(proto, data) => {
let size = data.len();
write!(
f,
"InboundInternetPacket{{ proto={} size={} }}",
proto, size
)
}
Event::OutboundInternetPacket(data) => {
let size = data.len();
write!(f, "OutboundInternetPacket{{ size={} }}", size)
}
Event::VirtualDeviceFed(proto) => {
write!(f, "VirtualDeviceFed{{ proto={} }}", proto)
}
}
}
}
#[derive(Clone)]
pub struct Bus {
counter: Arc<AtomicU32>,
bus: Arc<tokio::sync::broadcast::Sender<(u32, Event)>>,
}
impl Bus {
/// Creates a new event bus.
pub fn new() -> Self {
let (bus, _) = tokio::sync::broadcast::channel(1000);
let bus = Arc::new(bus);
let counter = Arc::new(AtomicU32::default());
Self { bus, counter }
}
/// Creates a new endpoint on the event bus.
pub fn new_endpoint(&self) -> BusEndpoint {
let id = self.counter.fetch_add(1, Ordering::Relaxed);
let tx = (*self.bus).clone();
let rx = self.bus.subscribe();
let tx = BusSender { id, tx };
BusEndpoint { id, tx, rx }
}
}
impl Default for Bus {
fn default() -> Self {
Self::new()
}
}
pub struct BusEndpoint {
id: u32,
tx: BusSender,
rx: tokio::sync::broadcast::Receiver<(u32, Event)>,
}
impl BusEndpoint {
/// Sends the event on the bus. Note that the messages sent by this endpoint won't reach itself.
pub fn send(&self, event: Event) {
self.tx.send(event)
}
/// Returns the unique sequential ID of this endpoint.
pub fn id(&self) -> u32 {
self.id
}
/// Awaits the next `Event` on the bus to be read.
pub async fn recv(&mut self) -> Event {
loop {
match self.rx.recv().await {
Ok((id, event)) => {
if id == self.id {
// If the event was sent by this endpoint, it is skipped
continue;
} else {
return event;
}
}
Err(_) => {
error!("Failed to read event bus from endpoint #{}", self.id);
return futures::future::pending().await;
}
}
}
}
/// Creates a new sender for this endpoint that can be cloned.
pub fn sender(&self) -> BusSender {
self.tx.clone()
}
}
#[derive(Clone)]
pub struct BusSender {
id: u32,
tx: tokio::sync::broadcast::Sender<(u32, Event)>,
}
impl BusSender {
/// Sends the event on the bus. Note that the messages sent by this endpoint won't reach itself.
pub fn send(&self, event: Event) {
trace!("#{} -> {}", self.id, event);
match self.tx.send((self.id, event)) {
Ok(_) => {}
Err(_) => error!("Failed to send event to bus from endpoint #{}", self.id),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_bus() {
let bus = Bus::new();
let mut endpoint_1 = bus.new_endpoint();
let mut endpoint_2 = bus.new_endpoint();
let mut endpoint_3 = bus.new_endpoint();
assert_eq!(endpoint_1.id(), 0);
assert_eq!(endpoint_2.id(), 1);
assert_eq!(endpoint_3.id(), 2);
endpoint_1.send(Event::Dumb);
let recv_2 = endpoint_2.recv().await;
let recv_3 = endpoint_3.recv().await;
assert!(matches!(recv_2, Event::Dumb));
assert!(matches!(recv_3, Event::Dumb));
endpoint_2.send(Event::Dumb);
let recv_1 = endpoint_1.recv().await;
let recv_3 = endpoint_3.recv().await;
assert!(matches!(recv_1, Event::Dumb));
assert!(matches!(recv_3, Event::Dumb));
}
}

View file

@ -1,35 +0,0 @@
use crate::virtual_device::VirtualIpDevice;
use crate::wg::WireGuardTunnel;
use smoltcp::iface::InterfaceBuilder;
use std::sync::Arc;
use tokio::time::Duration;
/// A repeating task that processes unroutable IP packets.
pub async fn run_ip_sink_interface(wg: Arc<WireGuardTunnel>) -> ! {
// Initialize interface
let device = VirtualIpDevice::new_sink(wg)
.await
.expect("Failed to initialize VirtualIpDevice for sink interface");
// No sockets on sink interface
let mut sockets: [_; 0] = Default::default();
let mut virtual_interface = InterfaceBuilder::new(device, &mut sockets[..])
.ip_addrs([])
.finalize();
loop {
let loop_start = smoltcp::time::Instant::now();
match virtual_interface.poll(loop_start) {
Ok(processed) if processed => {
trace!("[SINK] Virtual interface polled some packets to be processed",);
tokio::time::sleep(Duration::from_millis(1)).await;
}
Err(e) => {
error!("[SINK] Virtual interface poll error: {:?}", e);
}
_ => {
tokio::time::sleep(Duration::from_millis(5)).await;
}
}
}
}

122
src/lib.rs Normal file
View file

@ -0,0 +1,122 @@
#[macro_use]
extern crate log;
use std::sync::Arc;
use anyhow::Context;
use crate::config::{Config, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::tcp::TcpVirtualInterface;
use crate::virtual_iface::udp::UdpVirtualInterface;
use crate::virtual_iface::VirtualInterfacePoll;
use crate::wg::WireGuardTunnel;
pub mod config;
pub mod events;
#[cfg(feature = "pcap")]
pub mod pcap;
pub mod tunnel;
pub mod virtual_device;
pub mod virtual_iface;
pub mod wg;
/// Starts the onetun tunnels in separate tokio tasks.
///
/// Note: This future completes immediately.
pub async fn start_tunnels(config: Config, bus: Bus) -> anyhow::Result<()> {
// Initialize the port pool for each protocol
let tcp_port_pool = TcpPortPool::new();
let udp_port_pool = UdpPortPool::new();
#[cfg(feature = "pcap")]
if let Some(pcap_file) = config.pcap_file.clone() {
// Start packet capture
let bus = bus.clone();
tokio::spawn(async move { pcap::capture(pcap_file, bus).await });
}
let wg = WireGuardTunnel::new(&config, bus.clone())
.await
.context("Failed to initialize WireGuard tunnel")?;
let wg = Arc::new(wg);
{
// Start routine task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.routine_task().await });
}
{
// Start consumption task for WireGuard
let wg = wg.clone();
tokio::spawn(Box::pin(async move { wg.consume_task().await }));
}
{
// Start production task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.produce_task().await });
}
if config
.port_forwards
.iter()
.any(|pf| pf.protocol == PortProtocol::Tcp)
{
// TCP device
let bus = bus.clone();
let device =
VirtualIpDevice::new(PortProtocol::Tcp, bus.clone(), config.max_transmission_unit);
// Start TCP Virtual Interface
let port_forwards = config.port_forwards.clone();
let iface = TcpVirtualInterface::new(port_forwards, bus, config.source_peer_ip);
tokio::spawn(async move { iface.poll_loop(device).await });
}
if config
.port_forwards
.iter()
.any(|pf| pf.protocol == PortProtocol::Udp)
{
// UDP device
let bus = bus.clone();
let device =
VirtualIpDevice::new(PortProtocol::Udp, bus.clone(), config.max_transmission_unit);
// Start UDP Virtual Interface
let port_forwards = config.port_forwards.clone();
let iface = UdpVirtualInterface::new(port_forwards, bus, config.source_peer_ip);
tokio::spawn(async move { iface.poll_loop(device).await });
}
{
let port_forwards = config.port_forwards;
let source_peer_ip = config.source_peer_ip;
port_forwards
.into_iter()
.map(|pf| {
(
pf,
wg.clone(),
tcp_port_pool.clone(),
udp_port_pool.clone(),
bus.clone(),
)
})
.for_each(move |(pf, wg, tcp_port_pool, udp_port_pool, bus)| {
tokio::spawn(async move {
tunnel::port_forward(pf, source_peer_ip, tcp_port_pool, udp_port_pool, wg, bus)
.await
.unwrap_or_else(|e| error!("Port-forward failed for {} : {}", pf, e))
});
});
}
Ok(())
}

View file

@ -1,81 +1,36 @@
#[cfg(feature = "bin")]
#[macro_use] #[macro_use]
extern crate log; extern crate log;
use std::sync::Arc; #[cfg(feature = "bin")]
use anyhow::Context;
use crate::config::Config;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::wg::WireGuardTunnel;
pub mod config;
pub mod ip_sink;
pub mod tunnel;
pub mod virtual_device;
pub mod virtual_iface;
pub mod wg;
#[tokio::main] #[tokio::main]
async fn main() -> anyhow::Result<()> { async fn main() -> anyhow::Result<()> {
let config = Config::from_args().with_context(|| "Failed to read config")?; use anyhow::Context;
use onetun::{config::Config, events::Bus};
let config = Config::from_args().context("Configuration has errors")?;
init_logger(&config)?; init_logger(&config)?;
for warning in &config.warnings { for warning in &config.warnings {
warn!("{}", warning); warn!("{}", warning);
} }
// Initialize the port pool for each protocol let bus = Bus::default();
let tcp_port_pool = TcpPortPool::new(); onetun::start_tunnels(config, bus).await?;
let udp_port_pool = UdpPortPool::new();
let wg = WireGuardTunnel::new(&config)
.await
.with_context(|| "Failed to initialize WireGuard tunnel")?;
let wg = Arc::new(wg);
{
// Start routine task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.routine_task().await });
}
{
// Start consumption task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.consume_task().await });
}
{
// Start IP sink task for incoming IP packets
let wg = wg.clone();
tokio::spawn(async move { ip_sink::run_ip_sink_interface(wg).await });
}
{
let port_forwards = config.port_forwards;
let source_peer_ip = config.source_peer_ip;
port_forwards
.into_iter()
.map(|pf| (pf, wg.clone(), tcp_port_pool.clone(), udp_port_pool.clone()))
.for_each(move |(pf, wg, tcp_port_pool, udp_port_pool)| {
tokio::spawn(async move {
tunnel::port_forward(pf, source_peer_ip, tcp_port_pool, udp_port_pool, wg)
.await
.unwrap_or_else(|e| error!("Port-forward failed for {} : {}", pf, e))
});
});
}
futures::future::pending().await futures::future::pending().await
} }
fn init_logger(config: &Config) -> anyhow::Result<()> { #[cfg(not(feature = "bin"))]
let mut builder = pretty_env_logger::formatted_builder(); fn main() -> anyhow::Result<()> {
builder.parse_filters(&config.log); Err(anyhow::anyhow!("Binary compiled without 'bin' feature"))
builder }
.try_init()
.with_context(|| "Failed to initialize logger") #[cfg(feature = "bin")]
fn init_logger(config: &onetun::config::Config) -> anyhow::Result<()> {
use anyhow::Context;
let mut builder = pretty_env_logger::formatted_timed_builder();
builder.parse_filters(&config.log);
builder.try_init().context("Failed to initialize logger")
} }

113
src/pcap.rs Normal file
View file

@ -0,0 +1,113 @@
use crate::events::Event;
use crate::Bus;
use anyhow::Context;
use smoltcp::time::Instant;
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
struct Pcap {
writer: BufWriter<File>,
}
/// libpcap file writer
/// This is mostly taken from `smoltcp`, but rewritten to be async.
impl Pcap {
async fn flush(&mut self) -> anyhow::Result<()> {
self.writer
.flush()
.await
.context("Failed to flush pcap writer")
}
async fn write(&mut self, data: &[u8]) -> anyhow::Result<usize> {
self.writer
.write(data)
.await
.with_context(|| format!("Failed to write {} bytes to pcap writer", data.len()))
}
async fn write_u16(&mut self, value: u16) -> anyhow::Result<()> {
self.writer
.write_u16(value)
.await
.context("Failed to write u16 to pcap writer")
}
async fn write_u32(&mut self, value: u32) -> anyhow::Result<()> {
self.writer
.write_u32(value)
.await
.context("Failed to write u32 to pcap writer")
}
async fn global_header(&mut self) -> anyhow::Result<()> {
self.write_u32(0xa1b2c3d4).await?; // magic number
self.write_u16(2).await?; // major version
self.write_u16(4).await?; // minor version
self.write_u32(0).await?; // timezone (= UTC)
self.write_u32(0).await?; // accuracy (not used)
self.write_u32(65535).await?; // maximum packet length
self.write_u32(101).await?; // link-layer header type (101 = IP)
self.flush().await
}
async fn packet_header(&mut self, timestamp: Instant, length: usize) -> anyhow::Result<()> {
assert!(length <= 65535);
self.write_u32(timestamp.secs() as u32).await?; // timestamp seconds
self.write_u32(timestamp.micros() as u32).await?; // timestamp microseconds
self.write_u32(length as u32).await?; // captured length
self.write_u32(length as u32).await?; // original length
Ok(())
}
async fn packet(&mut self, timestamp: Instant, packet: &[u8]) -> anyhow::Result<()> {
self.packet_header(timestamp, packet.len())
.await
.context("Failed to write packet header to pcap writer")?;
self.write(packet)
.await
.context("Failed to write packet to pcap writer")?;
self.writer
.flush()
.await
.context("Failed to flush pcap writer")?;
self.flush().await
}
}
/// Listens on the event bus for IP packets sent from and to the WireGuard tunnel.
pub async fn capture(pcap_file: String, bus: Bus) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
let file = File::create(&pcap_file)
.await
.context("Failed to create pcap file")?;
let writer = BufWriter::new(file);
let mut writer = Pcap { writer };
writer
.global_header()
.await
.context("Failed to write global header to pcap writer")?;
info!("Capturing WireGuard IP packets to {}", &pcap_file);
loop {
match endpoint.recv().await {
Event::InboundInternetPacket(_proto, ip) => {
let instant = Instant::now();
writer
.packet(instant, &ip)
.await
.context("Failed to write inbound IP packet to pcap writer")?;
}
Event::OutboundInternetPacket(ip) => {
let instant = Instant::now();
writer
.packet(instant, &ip)
.await
.context("Failed to write output IP packet to pcap writer")?;
}
_ => {}
}
}
}

View file

@ -2,12 +2,12 @@ use std::net::IpAddr;
use std::sync::Arc; use std::sync::Arc;
use crate::config::{PortForwardConfig, PortProtocol}; use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool; use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool; use crate::tunnel::udp::UdpPortPool;
use crate::wg::WireGuardTunnel; use crate::wg::WireGuardTunnel;
pub mod tcp; pub mod tcp;
#[allow(unused)]
pub mod udp; pub mod udp;
pub async fn port_forward( pub async fn port_forward(
@ -16,6 +16,7 @@ pub async fn port_forward(
tcp_port_pool: TcpPortPool, tcp_port_pool: TcpPortPool,
udp_port_pool: UdpPortPool, udp_port_pool: UdpPortPool,
wg: Arc<WireGuardTunnel>, wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
info!( info!(
"Tunneling {} [{}]->[{}] (via [{}] as peer {})", "Tunneling {} [{}]->[{}] (via [{}] as peer {})",
@ -27,7 +28,7 @@ pub async fn port_forward(
); );
match port_forward.protocol { match port_forward.protocol {
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, wg).await, PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, bus).await,
PortProtocol::Udp => udp::udp_proxy_server(port_forward, udp_port_pool, wg).await, PortProtocol::Udp => udp::udp_proxy_server(port_forward, udp_port_pool, bus).await,
} }
} }

View file

@ -1,17 +1,18 @@
use crate::config::{PortForwardConfig, PortProtocol};
use crate::virtual_iface::tcp::TcpVirtualInterface;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::wg::WireGuardTunnel;
use anyhow::Context;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::net::{TcpListener, TcpStream};
use std::ops::Range; use std::ops::Range;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use bytes::BytesMut;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::thread_rng; use rand::thread_rng;
use tokio::io::AsyncWriteExt;
use tokio::net::{TcpListener, TcpStream};
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::{Bus, Event};
use crate::virtual_iface::VirtualPort;
const MAX_PACKET: usize = 65536; const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000; const MIN_PORT: u16 = 1000;
@ -22,19 +23,18 @@ const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
pub async fn tcp_proxy_server( pub async fn tcp_proxy_server(
port_forward: PortForwardConfig, port_forward: PortForwardConfig,
port_pool: TcpPortPool, port_pool: TcpPortPool,
wg: Arc<WireGuardTunnel>, bus: Bus,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
let listener = TcpListener::bind(port_forward.source) let listener = TcpListener::bind(port_forward.source)
.await .await
.with_context(|| "Failed to listen on TCP proxy server")?; .context("Failed to listen on TCP proxy server")?;
loop { loop {
let wg = wg.clone();
let port_pool = port_pool.clone(); let port_pool = port_pool.clone();
let (socket, peer_addr) = listener let (socket, peer_addr) = listener
.accept() .accept()
.await .await
.with_context(|| "Failed to accept connection on TCP proxy server")?; .context("Failed to accept connection on TCP proxy server")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets // Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will // received from the WireGuard tunnel. It is the port number that the virtual client will
@ -52,10 +52,10 @@ pub async fn tcp_proxy_server(
info!("[{}] Incoming connection from {}", virtual_port, peer_addr); info!("[{}] Incoming connection from {}", virtual_port, peer_addr);
let bus = bus.clone();
tokio::spawn(async move { tokio::spawn(async move {
let port_pool = port_pool.clone(); let port_pool = port_pool.clone();
let result = let result = handle_tcp_proxy_connection(socket, virtual_port, port_forward, bus).await;
handle_tcp_proxy_connection(socket, virtual_port, port_forward, wg.clone()).await;
if let Err(e) = result { if let Err(e) = result {
error!( error!(
@ -66,8 +66,7 @@ pub async fn tcp_proxy_server(
info!("[{}] Connection closed by client", virtual_port); info!("[{}] Connection closed by client", virtual_port);
} }
// Release port when connection drops tokio::time::sleep(Duration::from_millis(100)).await; // Make sure the other tasks have time to process the event
wg.release_virtual_interface(VirtualPort(virtual_port, PortProtocol::Tcp));
port_pool.release(virtual_port).await; port_pool.release(virtual_port).await;
}); });
} }
@ -75,72 +74,26 @@ pub async fn tcp_proxy_server(
/// Handles a new TCP connection with its assigned virtual port. /// Handles a new TCP connection with its assigned virtual port.
async fn handle_tcp_proxy_connection( async fn handle_tcp_proxy_connection(
socket: TcpStream, mut socket: TcpStream,
virtual_port: u16, virtual_port: VirtualPort,
port_forward: PortForwardConfig, port_forward: PortForwardConfig,
wg: Arc<WireGuardTunnel>, bus: Bus,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// Abort signal for stopping the Virtual Interface let mut endpoint = bus.new_endpoint();
let abort = Arc::new(AtomicBool::new(false)); endpoint.send(Event::ClientConnectionInitiated(port_forward, virtual_port));
// Signals that the Virtual Client is ready to send data
let (virtual_client_ready_tx, virtual_client_ready_rx) = tokio::sync::oneshot::channel::<()>();
// data_to_real_client_(tx/rx): This task reads the data from this mpsc channel to send back
// to the real client.
let (data_to_real_client_tx, mut data_to_real_client_rx) = tokio::sync::mpsc::channel(1_000);
// data_to_real_server_(tx/rx): This task sends the data received from the real client to the
// virtual interface (virtual server socket).
let (data_to_virtual_server_tx, data_to_virtual_server_rx) = tokio::sync::mpsc::channel(1_000);
// Spawn virtual interface
{
let abort = abort.clone();
let virtual_interface = TcpVirtualInterface::new(
virtual_port,
port_forward,
wg,
abort.clone(),
data_to_real_client_tx,
data_to_virtual_server_rx,
virtual_client_ready_tx,
);
tokio::spawn(async move {
virtual_interface.poll_loop().await.unwrap_or_else(|e| {
error!("Virtual interface poll loop failed unexpectedly: {}", e);
abort.store(true, Ordering::Relaxed);
})
});
}
// Wait for virtual client to be ready.
virtual_client_ready_rx
.await
.with_context(|| "Virtual client dropped before being ready.")?;
trace!("[{}] Virtual client is ready to send data", virtual_port);
let mut buffer = BytesMut::with_capacity(MAX_PACKET);
loop { loop {
tokio::select! { tokio::select! {
readable_result = socket.readable() => { readable_result = socket.readable() => {
match readable_result { match readable_result {
Ok(_) => { Ok(_) => {
// Buffer for the individual TCP segment.
let mut buffer = Vec::with_capacity(MAX_PACKET);
match socket.try_read_buf(&mut buffer) { match socket.try_read_buf(&mut buffer) {
Ok(size) if size > 0 => { Ok(size) if size > 0 => {
let data = &buffer[..size]; let data = Vec::from(&buffer[..size]);
debug!( endpoint.send(Event::LocalData(port_forward, virtual_port, data.into()));
"[{}] Read {} bytes of TCP data from real client", // Reset buffer
virtual_port, size buffer.clear();
);
if let Err(e) = data_to_virtual_server_tx.send(data.to_vec()).await {
error!(
"[{}] Failed to dispatch data to virtual interface: {:?}",
virtual_port, e
);
}
} }
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => { Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue; continue;
@ -163,43 +116,47 @@ async fn handle_tcp_proxy_connection(
} }
} }
} }
data_recv_result = data_to_real_client_rx.recv() => { event = endpoint.recv() => {
match data_recv_result { match event {
Some(data) => match socket.try_write(&data) { Event::ClientConnectionDropped(e_vp) if e_vp == virtual_port => {
Ok(size) => { // This connection is supposed to be closed, stop the task.
debug!( break;
"[{}] Wrote {} bytes of TCP data to real client", }
virtual_port, size Event::RemoteData(e_vp, data) if e_vp == virtual_port => {
); // Have remote data to send to the local client
if let Err(e) = socket.writable().await {
error!("[{}] Failed to check if writable: {:?}", virtual_port, e);
} }
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => { let expected = data.len();
if abort.load(Ordering::Relaxed) { let mut sent = 0;
loop {
if sent >= expected {
break; break;
} else { }
continue; match socket.write(&data[sent..expected]).await {
Ok(written) => {
debug!("[{}] Sent {} (expected {}) bytes to local client", virtual_port, written, expected);
sent += written;
if sent < expected {
debug!("[{}] Will try to resend remaining {} bytes to local client", virtual_port, (expected - written));
}
},
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, expected, e);
break;
}
} }
} }
Err(e) => { }
error!( _ => {}
"[{}] Failed to write to client TCP socket: {:?}",
virtual_port, e
);
}
},
None => {
if abort.load(Ordering::Relaxed) {
break;
} else {
continue;
}
},
} }
} }
} }
} }
trace!("[{}] TCP socket handler task terminated", virtual_port); // Notify other endpoints that this task has closed and no more data is to be sent to the local client
abort.store(true, Ordering::Relaxed); endpoint.send(Event::ClientConnectionDropped(virtual_port));
Ok(()) Ok(())
} }
@ -229,20 +186,20 @@ impl TcpPortPool {
} }
} }
/// Requests a free port from the pool. An error is returned if none is available (exhaused max capacity). /// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self) -> anyhow::Result<u16> { pub async fn next(&self) -> anyhow::Result<VirtualPort> {
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
let port = inner let port = inner
.queue .queue
.pop_front() .pop_front()
.with_context(|| "TCP virtual port pool is exhausted")?; .context("TCP virtual port pool is exhausted")?;
Ok(port) Ok(VirtualPort::new(port, PortProtocol::Tcp))
} }
/// Releases a port back into the pool. /// Releases a port back into the pool.
pub async fn release(&self, port: u16) { pub async fn release(&self, port: VirtualPort) {
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
inner.queue.push_back(port); inner.queue.push_back(port.num());
} }
} }

View file

@ -1,21 +1,19 @@
use std::collections::{BTreeMap, HashMap, VecDeque}; use std::collections::{HashMap, VecDeque};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::ops::Range; use std::ops::Range;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
use anyhow::Context; use anyhow::Context;
use bytes::Bytes;
use priority_queue::double_priority_queue::DoublePriorityQueue; use priority_queue::double_priority_queue::DoublePriorityQueue;
use priority_queue::priority_queue::PriorityQueue;
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::thread_rng; use rand::thread_rng;
use tokio::net::UdpSocket; use tokio::net::UdpSocket;
use crate::config::{PortForwardConfig, PortProtocol}; use crate::config::{PortForwardConfig, PortProtocol};
use crate::virtual_iface::udp::UdpVirtualInterface; use crate::events::{Bus, Event};
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort}; use crate::virtual_iface::VirtualPort;
use crate::wg::WireGuardTunnel;
const MAX_PACKET: usize = 65536; const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000; const MIN_PORT: u16 = 1000;
@ -30,61 +28,24 @@ const UDP_TIMEOUT_SECONDS: u64 = 60;
/// TODO: Make this configurable by the CLI /// TODO: Make this configurable by the CLI
const PORTS_PER_IP: usize = 100; const PORTS_PER_IP: usize = 100;
/// Starts the server that listens on UDP datagrams.
pub async fn udp_proxy_server( pub async fn udp_proxy_server(
port_forward: PortForwardConfig, port_forward: PortForwardConfig,
port_pool: UdpPortPool, port_pool: UdpPortPool,
wg: Arc<WireGuardTunnel>, bus: Bus,
) -> anyhow::Result<()> { ) -> anyhow::Result<()> {
// Abort signal let mut endpoint = bus.new_endpoint();
let abort = Arc::new(AtomicBool::new(false));
// data_to_real_client_(tx/rx): This task reads the data from this mpsc channel to send back
// to the real client.
let (data_to_real_client_tx, mut data_to_real_client_rx) =
tokio::sync::mpsc::channel::<(VirtualPort, Vec<u8>)>(1_000);
// data_to_real_server_(tx/rx): This task sends the data received from the real client to the
// virtual interface (virtual server socket).
let (data_to_virtual_server_tx, data_to_virtual_server_rx) =
tokio::sync::mpsc::channel::<(VirtualPort, Vec<u8>)>(1_000);
{
// Spawn virtual interface
// Note: contrary to TCP, there is only one UDP virtual interface
let virtual_interface = UdpVirtualInterface::new(
port_forward,
wg,
data_to_real_client_tx,
data_to_virtual_server_rx,
);
let abort = abort.clone();
tokio::spawn(async move {
virtual_interface.poll_loop().await.unwrap_or_else(|e| {
error!("Virtual interface poll loop failed unexpectedly: {}", e);
abort.store(true, Ordering::Relaxed);
});
});
}
let socket = UdpSocket::bind(port_forward.source) let socket = UdpSocket::bind(port_forward.source)
.await .await
.with_context(|| "Failed to bind on UDP proxy address")?; .context("Failed to bind on UDP proxy address")?;
let mut buffer = [0u8; MAX_PACKET]; let mut buffer = [0u8; MAX_PACKET];
loop { loop {
if abort.load(Ordering::Relaxed) {
break;
}
tokio::select! { tokio::select! {
to_send_result = next_udp_datagram(&socket, &mut buffer, port_pool.clone()) => { to_send_result = next_udp_datagram(&socket, &mut buffer, port_pool.clone()) => {
match to_send_result { match to_send_result {
Ok(Some((port, data))) => { Ok(Some((port, data))) => {
data_to_virtual_server_tx.send((port, data)).await.unwrap_or_else(|e| { endpoint.send(Event::LocalData(port_forward, port, data));
error!(
"Failed to dispatch data to UDP virtual interface: {:?}",
e
);
});
} }
Ok(None) => { Ok(None) => {
continue; continue;
@ -98,18 +59,34 @@ pub async fn udp_proxy_server(
} }
} }
} }
data_recv_result = data_to_real_client_rx.recv() => { event = endpoint.recv() => {
if let Some((port, data)) = data_recv_result { if let Event::RemoteData(virtual_port, data) = event {
if let Some(peer_addr) = port_pool.get_peer_addr(port.0).await { if let Some(peer) = port_pool.get_peer_addr(virtual_port).await {
if let Err(e) = socket.send_to(&data, peer_addr).await { // Have remote data to send to the local client
error!( if let Err(e) = socket.writable().await {
"[{}] Failed to send UDP datagram to real client ({}): {:?}", error!("[{}] Failed to check if writable: {:?}", virtual_port, e);
port,
peer_addr,
e,
);
} }
port_pool.update_last_transmit(port.0).await; let expected = data.len();
let mut sent = 0;
loop {
if sent >= expected {
break;
}
match socket.send_to(&data[sent..expected], peer).await {
Ok(written) => {
debug!("[{}] Sent {} (expected {}) bytes to local client", virtual_port, written, expected);
sent += written;
if sent < expected {
debug!("[{}] Will try to resend remaining {} bytes to local client", virtual_port, (expected - written));
}
},
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, expected, e);
break;
}
}
}
port_pool.update_last_transmit(virtual_port).await;
} }
} }
} }
@ -122,11 +99,11 @@ async fn next_udp_datagram(
socket: &UdpSocket, socket: &UdpSocket,
buffer: &mut [u8], buffer: &mut [u8],
port_pool: UdpPortPool, port_pool: UdpPortPool,
) -> anyhow::Result<Option<(VirtualPort, Vec<u8>)>> { ) -> anyhow::Result<Option<(VirtualPort, Bytes)>> {
let (size, peer_addr) = socket let (size, peer_addr) = socket
.recv_from(buffer) .recv_from(buffer)
.await .await
.with_context(|| "Failed to accept incoming UDP datagram")?; .context("Failed to accept incoming UDP datagram")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets // Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will // received from the WireGuard tunnel. It is the port number that the virtual client will
@ -141,17 +118,16 @@ async fn next_udp_datagram(
return Ok(None); return Ok(None);
} }
}; };
let port = VirtualPort(port, PortProtocol::Udp);
debug!( debug!(
"[{}] Received datagram of {} bytes from {}", "[{}] Received datagram of {} bytes from {}",
port, size, peer_addr port, size, peer_addr
); );
port_pool.update_last_transmit(port.0).await; port_pool.update_last_transmit(port).await;
let data = buffer[..size].to_vec(); let data = buffer[..size].to_vec();
Ok(Some((port, data))) Ok(Some((port, data.into())))
} }
/// A pool of virtual ports available for TCP connections. /// A pool of virtual ports available for TCP connections.
@ -181,14 +157,14 @@ impl UdpPortPool {
} }
/// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity). /// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self, peer_addr: SocketAddr) -> anyhow::Result<u16> { pub async fn next(&self, peer_addr: SocketAddr) -> anyhow::Result<VirtualPort> {
// A port found to be reused. This is outside of the block because the read lock cannot be upgraded to a write lock. // A port found to be reused. This is outside of the block because the read lock cannot be upgraded to a write lock.
let mut port_reuse: Option<u16> = None; let mut port_reuse: Option<u16> = None;
{ {
let inner = self.inner.read().await; let inner = self.inner.read().await;
if let Some(port) = inner.port_by_peer_addr.get(&peer_addr) { if let Some(port) = inner.port_by_peer_addr.get(&peer_addr) {
return Ok(*port); return Ok(VirtualPort::new(*port, PortProtocol::Udp));
} }
// Count how many ports are being used by the peer IP // Count how many ports are being used by the peer IP
@ -236,30 +212,30 @@ impl UdpPortPool {
None None
} }
}) })
.with_context(|| "virtual port pool is exhausted")?; .context("Virtual port pool is exhausted")?;
inner.port_by_peer_addr.insert(peer_addr, port); inner.port_by_peer_addr.insert(peer_addr, port);
inner.peer_addr_by_port.insert(port, peer_addr); inner.peer_addr_by_port.insert(port, peer_addr);
Ok(port) Ok(VirtualPort::new(port, PortProtocol::Udp))
} }
/// Notify that the given virtual port has received or transmitted a UDP datagram. /// Notify that the given virtual port has received or transmitted a UDP datagram.
pub async fn update_last_transmit(&self, port: u16) { pub async fn update_last_transmit(&self, port: VirtualPort) {
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
if let Some(peer) = inner.peer_addr_by_port.get(&port).copied() { if let Some(peer) = inner.peer_addr_by_port.get(&port.num()).copied() {
let mut pq: &mut DoublePriorityQueue<u16, Instant> = inner let pq: &mut DoublePriorityQueue<u16, Instant> = inner
.peer_port_usage .peer_port_usage
.entry(peer.ip()) .entry(peer.ip())
.or_insert_with(Default::default); .or_insert_with(Default::default);
pq.push(port, Instant::now()); pq.push(port.num(), Instant::now());
} }
let mut pq: &mut DoublePriorityQueue<u16, Instant> = &mut inner.port_usage; let pq: &mut DoublePriorityQueue<u16, Instant> = &mut inner.port_usage;
pq.push(port, Instant::now()); pq.push(port.num(), Instant::now());
} }
pub async fn get_peer_addr(&self, port: u16) -> Option<SocketAddr> { pub async fn get_peer_addr(&self, port: VirtualPort) -> Option<SocketAddr> {
let inner = self.inner.read().await; let inner = self.inner.read().await;
inner.peer_addr_by_port.get(&port).copied() inner.peer_addr_by_port.get(&port.num()).copied()
} }
} }

View file

@ -1,113 +1,136 @@
use crate::virtual_iface::VirtualPort; use crate::config::PortProtocol;
use crate::wg::{WireGuardTunnel, DISPATCH_CAPACITY}; use crate::events::{BusSender, Event};
use anyhow::Context; use crate::Bus;
use smoltcp::phy::{Device, DeviceCapabilities, Medium}; use bytes::{BufMut, Bytes, BytesMut};
use smoltcp::time::Instant; use smoltcp::{
use std::sync::Arc; phy::{DeviceCapabilities, Medium},
time::Instant,
};
use std::{
collections::VecDeque,
sync::{Arc, Mutex},
};
/// A virtual device that processes IP packets. IP packets received from the WireGuard endpoint /// A virtual device that processes IP packets through smoltcp and WireGuard.
/// are made available to this device using a channel receiver. IP packets sent from this device
/// are asynchronously sent out to the WireGuard tunnel.
pub struct VirtualIpDevice { pub struct VirtualIpDevice {
/// Tunnel to send IP packets to. /// Max transmission unit (bytes)
wg: Arc<WireGuardTunnel>, max_transmission_unit: usize,
/// Channel receiver for received IP packets. /// Channel receiver for received IP packets.
ip_dispatch_rx: tokio::sync::mpsc::Receiver<Vec<u8>>, bus_sender: BusSender,
/// Local queue for packets received from the bus that need to go through the smoltcp interface.
process_queue: Arc<Mutex<VecDeque<Bytes>>>,
} }
impl VirtualIpDevice { impl VirtualIpDevice {
/// Initializes a new virtual IP device. /// Initializes a new virtual IP device.
pub fn new( pub fn new(protocol: PortProtocol, bus: Bus, max_transmission_unit: usize) -> Self {
wg: Arc<WireGuardTunnel>, let mut bus_endpoint = bus.new_endpoint();
ip_dispatch_rx: tokio::sync::mpsc::Receiver<Vec<u8>>, let bus_sender = bus_endpoint.sender();
) -> Self { let process_queue = Arc::new(Mutex::new(VecDeque::new()));
Self { wg, ip_dispatch_rx }
}
/// Registers a virtual IP device for a single virtual client. {
pub fn new_direct(virtual_port: VirtualPort, wg: Arc<WireGuardTunnel>) -> anyhow::Result<Self> { let process_queue = process_queue.clone();
let (ip_dispatch_tx, ip_dispatch_rx) = tokio::sync::mpsc::channel(DISPATCH_CAPACITY); tokio::spawn(async move {
loop {
match bus_endpoint.recv().await {
Event::InboundInternetPacket(ip_proto, data) if ip_proto == protocol => {
let mut queue = process_queue
.lock()
.expect("Failed to acquire process queue lock");
queue.push_back(data);
bus_endpoint.send(Event::VirtualDeviceFed(ip_proto));
}
_ => {}
}
}
});
}
wg.register_virtual_interface(virtual_port, ip_dispatch_tx) Self {
.with_context(|| "Failed to register IP dispatch for virtual interface")?; bus_sender,
process_queue,
Ok(Self { wg, ip_dispatch_rx }) max_transmission_unit,
} }
pub async fn new_sink(wg: Arc<WireGuardTunnel>) -> anyhow::Result<Self> {
let ip_dispatch_rx = wg
.register_sink_interface()
.await
.with_context(|| "Failed to register IP dispatch for sink virtual interface")?;
Ok(Self { wg, ip_dispatch_rx })
} }
} }
impl<'a> Device<'a> for VirtualIpDevice { impl smoltcp::phy::Device for VirtualIpDevice {
type RxToken = RxToken; type RxToken<'a>
type TxToken = TxToken; = RxToken
where
Self: 'a;
type TxToken<'a>
= TxToken
where
Self: 'a;
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> { fn receive(&mut self, _timestamp: Instant) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
match self.ip_dispatch_rx.try_recv() { let next = {
Ok(buffer) => Some(( let mut queue = self
Self::RxToken { buffer }, .process_queue
.lock()
.expect("Failed to acquire process queue lock");
queue.pop_front()
};
match next {
Some(buffer) => Some((
Self::RxToken {
buffer: {
let mut buf = BytesMut::new();
buf.put(buffer);
buf
},
},
Self::TxToken { Self::TxToken {
wg: self.wg.clone(), sender: self.bus_sender.clone(),
}, },
)), )),
Err(_) => None, None => None,
} }
} }
fn transmit(&'a mut self) -> Option<Self::TxToken> { fn transmit(&mut self, _timestamp: Instant) -> Option<Self::TxToken<'_>> {
Some(TxToken { Some(TxToken {
wg: self.wg.clone(), sender: self.bus_sender.clone(),
}) })
} }
fn capabilities(&self) -> DeviceCapabilities { fn capabilities(&self) -> DeviceCapabilities {
let mut cap = DeviceCapabilities::default(); let mut cap = DeviceCapabilities::default();
cap.medium = Medium::Ip; cap.medium = Medium::Ip;
cap.max_transmission_unit = self.wg.max_transmission_unit; cap.max_transmission_unit = self.max_transmission_unit;
cap cap
} }
} }
#[doc(hidden)] #[doc(hidden)]
pub struct RxToken { pub struct RxToken {
buffer: Vec<u8>, buffer: BytesMut,
} }
impl smoltcp::phy::RxToken for RxToken { impl smoltcp::phy::RxToken for RxToken {
fn consume<R, F>(mut self, _timestamp: Instant, f: F) -> smoltcp::Result<R> fn consume<R, F>(self, f: F) -> R
where where
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>, F: FnOnce(&[u8]) -> R,
{ {
f(&mut self.buffer) f(&self.buffer)
} }
} }
#[doc(hidden)] #[doc(hidden)]
pub struct TxToken { pub struct TxToken {
wg: Arc<WireGuardTunnel>, sender: BusSender,
} }
impl smoltcp::phy::TxToken for TxToken { impl smoltcp::phy::TxToken for TxToken {
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> smoltcp::Result<R> fn consume<R, F>(self, len: usize, f: F) -> R
where where
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>, F: FnOnce(&mut [u8]) -> R,
{ {
let mut buffer = Vec::new(); let mut buffer = vec![0; len];
buffer.resize(len, 0);
let result = f(&mut buffer); let result = f(&mut buffer);
tokio::spawn(async move { self.sender
match self.wg.send_ip_packet(&buffer).await { .send(Event::OutboundInternetPacket(buffer.into()));
Ok(_) => {}
Err(e) => {
error!("Failed to send IP packet to WireGuard endpoint: {:?}", e);
}
}
});
result result
} }
} }

View file

@ -2,6 +2,7 @@ pub mod tcp;
pub mod udp; pub mod udp;
use crate::config::PortProtocol; use crate::config::PortProtocol;
use crate::VirtualIpDevice;
use async_trait::async_trait; use async_trait::async_trait;
use std::fmt::{Display, Formatter}; use std::fmt::{Display, Formatter};
@ -9,15 +10,56 @@ use std::fmt::{Display, Formatter};
pub trait VirtualInterfacePoll { pub trait VirtualInterfacePoll {
/// Initializes the virtual interface and processes incoming data to be dispatched /// Initializes the virtual interface and processes incoming data to be dispatched
/// to the WireGuard tunnel and to the real client. /// to the WireGuard tunnel and to the real client.
async fn poll_loop(mut self) -> anyhow::Result<()>; async fn poll_loop(mut self, device: VirtualIpDevice) -> anyhow::Result<()>;
} }
/// Virtual port. /// Virtual port.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct VirtualPort(pub u16, pub PortProtocol); pub struct VirtualPort(u16, PortProtocol);
impl VirtualPort {
/// Create a new `VirtualPort` instance, with the given port number and associated protocol.
pub fn new(port: u16, proto: PortProtocol) -> Self {
VirtualPort(port, proto)
}
/// The port number
pub fn num(&self) -> u16 {
self.0
}
/// The protocol of this port.
pub fn proto(&self) -> PortProtocol {
self.1
}
}
impl From<VirtualPort> for u16 {
fn from(port: VirtualPort) -> Self {
port.num()
}
}
impl From<&VirtualPort> for u16 {
fn from(port: &VirtualPort) -> Self {
port.num()
}
}
impl From<VirtualPort> for PortProtocol {
fn from(port: VirtualPort) -> Self {
port.proto()
}
}
impl From<&VirtualPort> for PortProtocol {
fn from(port: &VirtualPort) -> Self {
port.proto()
}
}
impl Display for VirtualPort { impl Display for VirtualPort {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "[{}:{}]", self.0, self.1) write!(f, "[{}:{}]", self.num(), self.proto())
} }
} }

View file

@ -1,284 +1,257 @@
use crate::config::{PortForwardConfig, PortProtocol}; use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Event;
use crate::virtual_device::VirtualIpDevice; use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort}; use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::wg::WireGuardTunnel; use crate::Bus;
use anyhow::Context; use anyhow::Context;
use async_trait::async_trait; use async_trait::async_trait;
use smoltcp::iface::InterfaceBuilder; use bytes::Bytes;
use smoltcp::socket::{TcpSocket, TcpSocketBuffer, TcpState}; use smoltcp::iface::PollResult;
use smoltcp::wire::{IpAddress, IpCidr}; use smoltcp::{
use std::sync::atomic::{AtomicBool, Ordering}; iface::{Config, Interface, SocketHandle, SocketSet},
use std::sync::Arc; socket::tcp,
use std::time::Duration; time::Instant,
wire::{HardwareAddress, IpAddress, IpCidr, IpVersion},
};
use std::{
collections::{HashMap, HashSet, VecDeque},
net::IpAddr,
time::Duration,
};
const MAX_PACKET: usize = 65536; const MAX_PACKET: usize = 65536;
/// A virtual interface for proxying Layer 7 data to Layer 3 packets, and vice-versa. /// A virtual interface for proxying Layer 7 data to Layer 3 packets, and vice-versa.
pub struct TcpVirtualInterface { pub struct TcpVirtualInterface {
/// The virtual port assigned to the virtual client, used to source_peer_ip: IpAddr,
/// route Layer 4 segments/datagrams to and from the WireGuard tunnel. port_forwards: Vec<PortForwardConfig>,
virtual_port: u16, bus: Bus,
/// The overall port-forward configuration: used for the destination address (on which sockets: SocketSet<'static>,
/// the virtual server listens) and the protocol in use.
port_forward: PortForwardConfig,
/// The WireGuard tunnel to send IP packets to.
wg: Arc<WireGuardTunnel>,
/// Abort signal to shutdown the virtual interface and its parent task.
abort: Arc<AtomicBool>,
/// Channel sender for pushing Layer 7 data back to the real client.
data_to_real_client_tx: tokio::sync::mpsc::Sender<Vec<u8>>,
/// Channel receiver for processing Layer 7 data through the virtual interface.
data_to_virtual_server_rx: tokio::sync::mpsc::Receiver<Vec<u8>>,
/// One-shot sender to notify the parent task that the virtual client is ready to send Layer 7 data.
virtual_client_ready_tx: tokio::sync::oneshot::Sender<()>,
} }
impl TcpVirtualInterface { impl TcpVirtualInterface {
/// Initialize the parameters for a new virtual interface. /// Initialize the parameters for a new virtual interface.
/// Use the `poll_loop()` future to start the virtual interface poll loop. /// Use the `poll_loop()` future to start the virtual interface poll loop.
pub fn new( pub fn new(port_forwards: Vec<PortForwardConfig>, bus: Bus, source_peer_ip: IpAddr) -> Self {
virtual_port: u16,
port_forward: PortForwardConfig,
wg: Arc<WireGuardTunnel>,
abort: Arc<AtomicBool>,
data_to_real_client_tx: tokio::sync::mpsc::Sender<Vec<u8>>,
data_to_virtual_server_rx: tokio::sync::mpsc::Receiver<Vec<u8>>,
virtual_client_ready_tx: tokio::sync::oneshot::Sender<()>,
) -> Self {
Self { Self {
virtual_port, port_forwards: port_forwards
port_forward, .into_iter()
wg, .filter(|f| matches!(f.protocol, PortProtocol::Tcp))
abort, .collect(),
data_to_real_client_tx, source_peer_ip,
data_to_virtual_server_rx, bus,
virtual_client_ready_tx, sockets: SocketSet::new([]),
} }
} }
fn new_server_socket(port_forward: PortForwardConfig) -> anyhow::Result<tcp::Socket<'static>> {
static mut TCP_SERVER_RX_DATA: [u8; 0] = [];
static mut TCP_SERVER_TX_DATA: [u8; 0] = [];
let tcp_rx_buffer = tcp::SocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = tcp::SocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = tcp::Socket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.listen((
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
))
.context("Virtual server socket failed to listen")?;
Ok(socket)
}
fn new_client_socket() -> anyhow::Result<tcp::Socket<'static>> {
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let tcp_rx_buffer = tcp::SocketBuffer::new(rx_data);
let tcp_tx_buffer = tcp::SocketBuffer::new(tx_data);
let socket = tcp::Socket::new(tcp_rx_buffer, tcp_tx_buffer);
Ok(socket)
}
fn addresses(&self) -> Vec<IpCidr> {
let mut addresses = HashSet::new();
addresses.insert(IpAddress::from(self.source_peer_ip));
for config in self.port_forwards.iter() {
addresses.insert(IpAddress::from(config.destination.ip()));
}
addresses
.into_iter()
.map(|addr| IpCidr::new(addr, addr_length(&addr)))
.collect()
}
} }
#[async_trait] #[async_trait]
impl VirtualInterfacePoll for TcpVirtualInterface { impl VirtualInterfacePoll for TcpVirtualInterface {
async fn poll_loop(self) -> anyhow::Result<()> { async fn poll_loop(mut self, mut device: VirtualIpDevice) -> anyhow::Result<()> {
let mut virtual_client_ready_tx = Some(self.virtual_client_ready_tx); // Create CIDR block for source peer IP + each port forward IP
let mut data_to_virtual_server_rx = self.data_to_virtual_server_rx; let addresses = self.addresses();
let source_peer_ip = self.wg.source_peer_ip; let config = Config::new(HardwareAddress::Ip);
// Create a device and interface to simulate IP packets // Create virtual interface (contains smoltcp state machine)
// In essence: let mut iface = Interface::new(config, &mut device, Instant::now());
// * TCP packets received from the 'real' client are 'sent' to the 'virtual server' via the 'virtual client' iface.update_ip_addrs(|ip_addrs| {
// * Those TCP packets generate IP packets, which are captured from the interface and sent to the WireGuardTunnel addresses.into_iter().for_each(|addr| {
// * IP packets received by the WireGuardTunnel (from the endpoint) are fed into this 'virtual interface' ip_addrs
// * The interface processes those IP packets and routes them to the 'virtual client' (the rest is discarded) .push(addr)
// * The TCP data read by the 'virtual client' is sent to the 'real' TCP client .expect("maximum number of IPs in TCP interface reached");
});
});
// Consumer for IP packets to send through the virtual interface // Create virtual server for each port forward
// Initialize the interface for port_forward in self.port_forwards.iter() {
let device = let server_socket = TcpVirtualInterface::new_server_socket(*port_forward)?;
VirtualIpDevice::new_direct(VirtualPort(self.virtual_port, PortProtocol::Tcp), self.wg) self.sockets.add(server_socket);
.with_context(|| "Failed to initialize TCP VirtualIpDevice")?; }
// there are always 2 sockets: 1 virtual client and 1 virtual server. // The next time to poll the interface. Can be None for instant poll.
let mut sockets: [_; 2] = Default::default(); let mut next_poll: Option<tokio::time::Instant> = None;
let mut virtual_interface = InterfaceBuilder::new(device, &mut sockets[..])
.ip_addrs([
// Interface handles IP packets for the sender and recipient
IpCidr::new(IpAddress::from(source_peer_ip), 32),
IpCidr::new(IpAddress::from(self.port_forward.destination.ip()), 32),
])
.finalize();
// Server socket: this is a placeholder for the interface to route new connections to. // Bus endpoint to read events
let server_socket: anyhow::Result<TcpSocket> = { let mut endpoint = self.bus.new_endpoint();
static mut TCP_SERVER_RX_DATA: [u8; 0] = [];
static mut TCP_SERVER_TX_DATA: [u8; 0] = [];
let tcp_rx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
socket // Maps virtual port to its client socket handle
.listen(( let mut port_client_handle_map: HashMap<VirtualPort, SocketHandle> = HashMap::new();
IpAddress::from(self.port_forward.destination.ip()),
self.port_forward.destination.port(),
))
.with_context(|| "Virtual server socket failed to listen")?;
Ok(socket) // Data packets to send from a virtual client
}; let mut send_queue: HashMap<VirtualPort, VecDeque<Bytes>> = HashMap::new();
let client_socket: anyhow::Result<TcpSocket> = {
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let tcp_rx_buffer = TcpSocketBuffer::new(rx_data);
let tcp_tx_buffer = TcpSocketBuffer::new(tx_data);
let socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
Ok(socket)
};
let _server_handle = virtual_interface.add_socket(server_socket?);
let client_handle = virtual_interface.add_socket(client_socket?);
// Any data that wasn't sent because it was over the sending buffer limit
let mut tx_extra = Vec::new();
// Counts the connection attempts by the virtual client
let mut connection_attempts = 0;
// Whether the client has successfully connected before. Prevents the case of connecting again.
let mut has_connected = false;
loop { loop {
let loop_start = smoltcp::time::Instant::now(); tokio::select! {
_ = match (next_poll, port_client_handle_map.len()) {
(None, 0) => tokio::time::sleep(Duration::MAX),
(None, _) => tokio::time::sleep(Duration::ZERO),
(Some(until), _) => tokio::time::sleep_until(until),
} => {
let loop_start = smoltcp::time::Instant::now();
// Shutdown occurs when the real client closes the connection, // Find closed sockets
// or if the client was in a CLOSE-WAIT state (after a server FIN) and had no data to send anymore. port_client_handle_map.retain(|virtual_port, client_handle| {
// One last poll-loop iteration is executed so that the RST segment can be dispatched. let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
let shutdown = self.abort.load(Ordering::Relaxed); if client_socket.state() == tcp::State::Closed {
endpoint.send(Event::ClientConnectionDropped(*virtual_port));
if shutdown { send_queue.remove(virtual_port);
// Shutdown: sends a RST packet. self.sockets.remove(*client_handle);
trace!("[{}] Shutting down virtual interface", self.virtual_port); false
let client_socket = virtual_interface.get_socket::<TcpSocket>(client_handle); } else {
client_socket.abort(); // Not closed, retain
} true
match virtual_interface.poll(loop_start) {
Ok(processed) if processed => {
trace!(
"[{}] Virtual interface polled some packets to be processed",
self.virtual_port
);
}
Err(e) => {
error!(
"[{}] Virtual interface poll error: {:?}",
self.virtual_port, e
);
}
_ => {}
}
{
let (client_socket, context) =
virtual_interface.get_socket_and_context::<TcpSocket>(client_handle);
if !shutdown && client_socket.state() == TcpState::Closed && !has_connected {
// Not shutting down, but the client socket is closed, and the client never successfully connected.
if connection_attempts < 10 {
// Try to connect
client_socket
.connect(
context,
(
IpAddress::from(self.port_forward.destination.ip()),
self.port_forward.destination.port(),
),
(IpAddress::from(source_peer_ip), self.virtual_port),
)
.with_context(|| "Virtual server socket failed to listen")?;
if connection_attempts > 0 {
debug!(
"[{}] Virtual client retrying connection in 500ms",
self.virtual_port
);
// Not our first connection attempt, wait a little bit.
tokio::time::sleep(Duration::from_millis(500)).await;
} }
} else { });
// Too many connection attempts
self.abort.store(true, Ordering::Relaxed); if iface.poll(loop_start, &mut device, &mut self.sockets) == PollResult::SocketStateChanged {
log::trace!("TCP virtual interface polled some packets to be processed");
} }
connection_attempts += 1;
continue;
}
if client_socket.state() == TcpState::Established { for (virtual_port, client_handle) in port_client_handle_map.iter() {
// Prevent reconnection if the server later closes. let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
has_connected = true; if client_socket.can_send() {
} if let Some(send_queue) = send_queue.get_mut(virtual_port) {
let to_transfer = send_queue.pop_front();
if client_socket.can_recv() { if let Some(to_transfer_slice) = to_transfer.as_deref() {
match client_socket.recv(|buffer| (buffer.len(), buffer.to_vec())) { let total = to_transfer_slice.len();
Ok(data) => { match client_socket.send_slice(to_transfer_slice) {
trace!( Ok(sent) => {
"[{}] Virtual client received {} bytes of data", if sent < total {
self.virtual_port, // Sometimes only a subset is sent, so the rest needs to be sent on the next poll
data.len() let tx_extra = Vec::from(&to_transfer_slice[sent..total]);
); send_queue.push_front(tx_extra.into());
// Send it to the real client }
if let Err(e) = self.data_to_real_client_tx.send(data).await { }
error!("[{}] Failed to dispatch data from virtual client to real client: {:?}", self.virtual_port, e); Err(e) => {
error!(
"Failed to send slice via virtual client socket: {:?}", e
);
}
}
} else if client_socket.state() == tcp::State::CloseWait {
client_socket.close();
}
} }
} }
Err(e) => { if client_socket.can_recv() {
error!( match client_socket.recv(|buffer| (buffer.len(), Bytes::from(buffer.to_vec()))) {
"[{}] Failed to read from virtual client socket: {:?}", Ok(data) => {
self.virtual_port, e debug!("[{}] Received {} bytes from virtual server", virtual_port, data.len());
); if !data.is_empty() {
} endpoint.send(Event::RemoteData(*virtual_port, data));
} }
} }
if client_socket.can_send() { Err(e) => {
if let Some(virtual_client_ready_tx) = virtual_client_ready_tx.take() { error!(
virtual_client_ready_tx "Failed to read from virtual client socket: {:?}", e
.send(()) );
.expect("Failed to notify real client that virtual client is ready"); }
}
let mut to_transfer = None;
if tx_extra.is_empty() {
// The payload segment from the previous loop is complete,
// we can now read the next payload in the queue.
if let Ok(data) = data_to_virtual_server_rx.try_recv() {
to_transfer = Some(data);
} else if client_socket.state() == TcpState::CloseWait {
// No data to be sent in this loop. If the client state is CLOSE-WAIT (because of a server FIN),
// the interface is shutdown.
trace!("[{}] Shutting down virtual interface because client sent no more data, and server sent FIN (CLOSE-WAIT)", self.virtual_port);
self.abort.store(true, Ordering::Relaxed);
continue;
}
}
let to_transfer_slice = to_transfer.as_ref().unwrap_or(&tx_extra).as_slice();
if !to_transfer_slice.is_empty() {
let total = to_transfer_slice.len();
match client_socket.send_slice(to_transfer_slice) {
Ok(sent) => {
trace!(
"[{}] Sent {}/{} bytes via virtual client socket",
self.virtual_port,
sent,
total,
);
tx_extra = Vec::from(&to_transfer_slice[sent..total]);
}
Err(e) => {
error!(
"[{}] Failed to send slice via virtual client socket: {:?}",
self.virtual_port, e
);
} }
} }
} }
}
}
if shutdown { // The virtual interface determines the next time to poll (this is to reduce unnecessary polls)
break; next_poll = match iface.poll_delay(loop_start, &self.sockets) {
} Some(smoltcp::time::Duration::ZERO) => None,
Some(delay) => {
match virtual_interface.poll_delay(loop_start) { trace!("TCP Virtual interface delayed next poll by {}", delay);
Some(smoltcp::time::Duration::ZERO) => { Some(tokio::time::Instant::now() + Duration::from_millis(delay.total_millis()))
continue; },
None => None,
};
} }
_ => { event = endpoint.recv() => {
tokio::time::sleep(Duration::from_millis(1)).await; match event {
Event::ClientConnectionInitiated(port_forward, virtual_port) => {
let client_socket = TcpVirtualInterface::new_client_socket()?;
let client_handle = self.sockets.add(client_socket);
// Add handle to map
port_client_handle_map.insert(virtual_port, client_handle);
send_queue.insert(virtual_port, VecDeque::new());
let client_socket = self.sockets.get_mut::<tcp::Socket>(client_handle);
let context = iface.context();
client_socket
.connect(
context,
(
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
),
(IpAddress::from(self.source_peer_ip), virtual_port.num()),
)
.context("Virtual server socket failed to listen")?;
next_poll = None;
}
Event::ClientConnectionDropped(virtual_port) => {
if let Some(client_handle) = port_client_handle_map.get(&virtual_port) {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
client_socket.close();
next_poll = None;
}
}
Event::LocalData(_, virtual_port, data) if send_queue.contains_key(&virtual_port) => {
if let Some(send_queue) = send_queue.get_mut(&virtual_port) {
send_queue.push_back(data);
next_poll = None;
}
}
Event::VirtualDeviceFed(PortProtocol::Tcp) => {
next_poll = None;
}
_ => {}
}
} }
} }
} }
trace!("[{}] Virtual interface task terminated", self.virtual_port); }
self.abort.store(true, Ordering::Relaxed); }
Ok(())
const fn addr_length(addr: &IpAddress) -> u8 {
match addr.version() {
IpVersion::Ipv4 => 32,
IpVersion::Ipv6 => 128,
} }
} }

View file

@ -1,200 +1,227 @@
use anyhow::Context;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use smoltcp::iface::{InterfaceBuilder, SocketHandle};
use smoltcp::socket::{UdpPacketMetadata, UdpSocket, UdpSocketBuffer};
use smoltcp::wire::{IpAddress, IpCidr};
use crate::config::PortForwardConfig; use crate::config::PortForwardConfig;
use crate::events::Event;
use crate::virtual_device::VirtualIpDevice; use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort}; use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::wg::{WireGuardTunnel, DISPATCH_CAPACITY}; use crate::{Bus, PortProtocol};
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use smoltcp::iface::PollResult;
use smoltcp::{
iface::{Config, Interface, SocketHandle, SocketSet},
socket::udp::{self, UdpMetadata},
time::Instant,
wire::{HardwareAddress, IpAddress, IpCidr, IpVersion},
};
use std::{
collections::{HashMap, HashSet, VecDeque},
net::IpAddr,
time::Duration,
};
const MAX_PACKET: usize = 65536; const MAX_PACKET: usize = 65536;
pub struct UdpVirtualInterface { pub struct UdpVirtualInterface {
port_forward: PortForwardConfig, source_peer_ip: IpAddr,
wg: Arc<WireGuardTunnel>, port_forwards: Vec<PortForwardConfig>,
data_to_real_client_tx: tokio::sync::mpsc::Sender<(VirtualPort, Vec<u8>)>, bus: Bus,
data_to_virtual_server_rx: tokio::sync::mpsc::Receiver<(VirtualPort, Vec<u8>)>, sockets: SocketSet<'static>,
} }
impl UdpVirtualInterface { impl UdpVirtualInterface {
pub fn new( /// Initialize the parameters for a new virtual interface.
port_forward: PortForwardConfig, /// Use the `poll_loop()` future to start the virtual interface poll loop.
wg: Arc<WireGuardTunnel>, pub fn new(port_forwards: Vec<PortForwardConfig>, bus: Bus, source_peer_ip: IpAddr) -> Self {
data_to_real_client_tx: tokio::sync::mpsc::Sender<(VirtualPort, Vec<u8>)>,
data_to_virtual_server_rx: tokio::sync::mpsc::Receiver<(VirtualPort, Vec<u8>)>,
) -> Self {
Self { Self {
port_forward, port_forwards: port_forwards
wg, .into_iter()
data_to_real_client_tx, .filter(|f| matches!(f.protocol, PortProtocol::Udp))
data_to_virtual_server_rx, .collect(),
source_peer_ip,
bus,
sockets: SocketSet::new([]),
} }
} }
fn new_server_socket(port_forward: PortForwardConfig) -> anyhow::Result<udp::Socket<'static>> {
static mut UDP_SERVER_RX_META: [udp::PacketMetadata; 0] = [];
static mut UDP_SERVER_RX_DATA: [u8; 0] = [];
static mut UDP_SERVER_TX_META: [udp::PacketMetadata; 0] = [];
static mut UDP_SERVER_TX_DATA: [u8; 0] = [];
let udp_rx_buffer =
udp::PacketBuffer::new(unsafe { &mut UDP_SERVER_RX_META[..] }, unsafe {
&mut UDP_SERVER_RX_DATA[..]
});
let udp_tx_buffer =
udp::PacketBuffer::new(unsafe { &mut UDP_SERVER_TX_META[..] }, unsafe {
&mut UDP_SERVER_TX_DATA[..]
});
let mut socket = udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
))
.context("UDP virtual server socket failed to bind")?;
Ok(socket)
}
fn new_client_socket(
source_peer_ip: IpAddr,
client_port: VirtualPort,
) -> anyhow::Result<udp::Socket<'static>> {
let rx_meta = vec![udp::PacketMetadata::EMPTY; 10];
let tx_meta = vec![udp::PacketMetadata::EMPTY; 10];
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let udp_rx_buffer = udp::PacketBuffer::new(rx_meta, rx_data);
let udp_tx_buffer = udp::PacketBuffer::new(tx_meta, tx_data);
let mut socket = udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((IpAddress::from(source_peer_ip), client_port.num()))
.context("UDP virtual client failed to bind")?;
Ok(socket)
}
fn addresses(&self) -> Vec<IpCidr> {
let mut addresses = HashSet::new();
addresses.insert(IpAddress::from(self.source_peer_ip));
for config in self.port_forwards.iter() {
addresses.insert(IpAddress::from(config.destination.ip()));
}
addresses
.into_iter()
.map(|addr| IpCidr::new(addr, addr_length(&addr)))
.collect()
}
} }
#[async_trait] #[async_trait]
impl VirtualInterfacePoll for UdpVirtualInterface { impl VirtualInterfacePoll for UdpVirtualInterface {
async fn poll_loop(self) -> anyhow::Result<()> { async fn poll_loop(mut self, mut device: VirtualIpDevice) -> anyhow::Result<()> {
// Data receiver to dispatch using virtual client sockets // Create CIDR block for source peer IP + each port forward IP
let mut data_to_virtual_server_rx = self.data_to_virtual_server_rx; let addresses = self.addresses();
let config = Config::new(HardwareAddress::Ip);
// The IP to bind client sockets to // Create virtual interface (contains smoltcp state machine)
let source_peer_ip = self.wg.source_peer_ip; let mut iface = Interface::new(config, &mut device, Instant::now());
iface.update_ip_addrs(|ip_addrs| {
addresses.into_iter().for_each(|addr| {
ip_addrs
.push(addr)
.expect("maximum number of IPs in UDP interface reached");
});
});
// The IP/port to bind the server socket to // Create virtual server for each port forward
let destination = self.port_forward.destination; for port_forward in self.port_forwards.iter() {
let server_socket = UdpVirtualInterface::new_server_socket(*port_forward)?;
self.sockets.add(server_socket);
}
// Initialize a channel for IP packets. // The next time to poll the interface. Can be None for instant poll.
// The "base transmitted" is cloned so that each virtual port can register a sender in the tunnel.
// The receiver is given to the device so that the Virtual Interface can process incoming IP packets from the tunnel.
let (base_ip_dispatch_tx, ip_dispatch_rx) = tokio::sync::mpsc::channel(DISPATCH_CAPACITY);
let device = VirtualIpDevice::new(self.wg.clone(), ip_dispatch_rx);
let mut virtual_interface = InterfaceBuilder::new(device, vec![])
.ip_addrs([
// Interface handles IP packets for the sender and recipient
IpCidr::new(source_peer_ip.into(), 32),
IpCidr::new(destination.ip().into(), 32),
])
.finalize();
// Server socket: this is a placeholder for the interface.
let server_socket: anyhow::Result<UdpSocket> = {
static mut UDP_SERVER_RX_META: [UdpPacketMetadata; 0] = [];
static mut UDP_SERVER_RX_DATA: [u8; 0] = [];
static mut UDP_SERVER_TX_META: [UdpPacketMetadata; 0] = [];
static mut UDP_SERVER_TX_DATA: [u8; 0] = [];
let udp_rx_buffer =
UdpSocketBuffer::new(unsafe { &mut UDP_SERVER_RX_META[..] }, unsafe {
&mut UDP_SERVER_RX_DATA[..]
});
let udp_tx_buffer =
UdpSocketBuffer::new(unsafe { &mut UDP_SERVER_TX_META[..] }, unsafe {
&mut UDP_SERVER_TX_DATA[..]
});
let mut socket = UdpSocket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((IpAddress::from(destination.ip()), destination.port()))
.with_context(|| "UDP virtual server socket failed to listen")?;
Ok(socket)
};
let _server_handle = virtual_interface.add_socket(server_socket?);
// A map of virtual port to client socket.
let mut client_sockets: HashMap<VirtualPort, SocketHandle> = HashMap::new();
// The next instant required to poll the virtual interface
// None means "immediate poll required".
let mut next_poll: Option<tokio::time::Instant> = None; let mut next_poll: Option<tokio::time::Instant> = None;
// Bus endpoint to read events
let mut endpoint = self.bus.new_endpoint();
// Maps virtual port to its client socket handle
let mut port_client_handle_map: HashMap<VirtualPort, SocketHandle> = HashMap::new();
// Data packets to send from a virtual client
let mut send_queue: HashMap<VirtualPort, VecDeque<(PortForwardConfig, Bytes)>> =
HashMap::new();
loop { loop {
let wg = self.wg.clone();
tokio::select! { tokio::select! {
// Wait the recommended amount of time by smoltcp, and poll again. _ = match (next_poll, port_client_handle_map.len()) {
_ = match next_poll { (None, 0) => tokio::time::sleep(Duration::MAX),
None => tokio::time::sleep(Duration::ZERO), (None, _) => tokio::time::sleep(Duration::ZERO),
Some(until) => tokio::time::sleep_until(until) (Some(until), _) => tokio::time::sleep_until(until),
} => { } => {
let loop_start = smoltcp::time::Instant::now(); let loop_start = smoltcp::time::Instant::now();
match virtual_interface.poll(loop_start) { if iface.poll(loop_start, &mut device, &mut self.sockets) == PollResult::SocketStateChanged {
Ok(processed) if processed => { log::trace!("UDP virtual interface polled some packets to be processed");
trace!("UDP virtual interface polled some packets to be processed");
}
Err(e) => error!("UDP virtual interface poll error: {:?}", e),
_ => {}
} }
// Loop through each client socket and check if there is any data to send back for (virtual_port, client_handle) in port_client_handle_map.iter() {
// to the real client. let client_socket = self.sockets.get_mut::<udp::Socket>(*client_handle);
for (virtual_port, client_socket_handle) in client_sockets.iter() { if client_socket.can_send() {
let client_socket = virtual_interface.get_socket::<UdpSocket>(*client_socket_handle); if let Some(send_queue) = send_queue.get_mut(virtual_port) {
match client_socket.recv() { let to_transfer = send_queue.pop_front();
Ok((data, _peer)) => { if let Some((port_forward, data)) = to_transfer {
// Send the data back to the real client using MPSC channel client_socket
self.data_to_real_client_tx .send_slice(
.send((*virtual_port, data.to_vec())) &data,
.await UdpMetadata::from(port_forward.destination),
.unwrap_or_else(|e| { )
error!( .unwrap_or_else(|e| {
"[{}] Failed to dispatch data from virtual client to real client: {:?}", error!(
virtual_port, e "[{}] Failed to send data to virtual server: {:?}",
); virtual_port, e
}); );
} });
Err(smoltcp::Error::Exhausted) => {} }
Err(e) => {
error!(
"[{}] Failed to read from virtual client socket: {:?}",
virtual_port, e
);
} }
} }
} if client_socket.can_recv() {
match client_socket.recv() {
next_poll = match virtual_interface.poll_delay(loop_start) { Ok((data, _peer)) => {
Some(smoltcp::time::Duration::ZERO) => None, if !data.is_empty() {
Some(delay) => Some(tokio::time::Instant::now() + Duration::from_millis(delay.millis())), endpoint.send(Event::RemoteData(*virtual_port, data.to_vec().into()));
None => None, }
} }
} Err(e) => {
// Wait for data to be received from the real client
data_recv_result = data_to_virtual_server_rx.recv() => {
if let Some((client_port, data)) = data_recv_result {
// Register the socket in WireGuard Tunnel (overrides any previous registration as well)
wg.register_virtual_interface(client_port, base_ip_dispatch_tx.clone())
.unwrap_or_else(|e| {
error!(
"[{}] Failed to register UDP socket in WireGuard tunnel: {:?}",
client_port, e
);
});
let client_socket_handle = client_sockets.entry(client_port).or_insert_with(|| {
let rx_meta = vec![UdpPacketMetadata::EMPTY; 10];
let tx_meta = vec![UdpPacketMetadata::EMPTY; 10];
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let udp_rx_buffer = UdpSocketBuffer::new(rx_meta, rx_data);
let udp_tx_buffer = UdpSocketBuffer::new(tx_meta, tx_data);
let mut socket = UdpSocket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((IpAddress::from(wg.source_peer_ip), client_port.0))
.unwrap_or_else(|e| {
error!( error!(
"[{}] UDP virtual client socket failed to bind: {:?}", "Failed to read from virtual client socket: {:?}", e
client_port, e
); );
}); }
}
}
}
virtual_interface.add_socket(socket) // The virtual interface determines the next time to poll (this is to reduce unnecessary polls)
}); next_poll = match iface.poll_delay(loop_start, &self.sockets) {
Some(smoltcp::time::Duration::ZERO) => None,
Some(delay) => {
trace!("UDP Virtual interface delayed next poll by {}", delay);
Some(tokio::time::Instant::now() + Duration::from_millis(delay.total_millis()))
},
None => None,
};
}
event = endpoint.recv() => {
match event {
Event::LocalData(port_forward, virtual_port, data) => {
if let Some(send_queue) = send_queue.get_mut(&virtual_port) {
// Client socket already exists
send_queue.push_back((port_forward, data));
} else {
// Client socket does not exist
let client_socket = UdpVirtualInterface::new_client_socket(self.source_peer_ip, virtual_port)?;
let client_handle = self.sockets.add(client_socket);
let client_socket = virtual_interface.get_socket::<UdpSocket>(*client_socket_handle); // Add handle to map
client_socket port_client_handle_map.insert(virtual_port, client_handle);
.send_slice( send_queue.insert(virtual_port, VecDeque::from(vec![(port_forward, data)]));
&data, }
(IpAddress::from(destination.ip()), destination.port()).into(), next_poll = None;
) }
.unwrap_or_else(|e| { Event::VirtualDeviceFed(PortProtocol::Udp) => {
error!( next_poll = None;
"[{}] Failed to send data to virtual server: {:?}", }
client_port, e _ => {}
);
});
} }
} }
} }
} }
} }
} }
const fn addr_length(addr: &IpAddress) -> u8 {
match addr.version() {
IpVersion::Ipv4 => 32,
IpVersion::Ipv6 => 128,
}
}

317
src/wg.rs
View file

@ -1,15 +1,18 @@
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::time::Duration; use std::time::Duration;
use crate::Bus;
use anyhow::Context; use anyhow::Context;
use async_recursion::async_recursion;
use boringtun::noise::errors::WireGuardError;
use boringtun::noise::{Tunn, TunnResult}; use boringtun::noise::{Tunn, TunnResult};
use log::Level; use log::Level;
use smoltcp::wire::{IpProtocol, IpVersion, Ipv4Packet, Ipv6Packet, TcpPacket, UdpPacket}; use smoltcp::wire::{IpProtocol, IpVersion, Ipv4Packet, Ipv6Packet};
use tokio::net::UdpSocket; use tokio::net::UdpSocket;
use tokio::sync::RwLock; use tokio::sync::Mutex;
use crate::config::{Config, PortProtocol}; use crate::config::{Config, PortProtocol};
use crate::virtual_iface::VirtualPort; use crate::events::Event;
/// The capacity of the channel for received IP packets. /// The capacity of the channel for received IP packets.
pub const DISPATCH_CAPACITY: usize = 1_000; pub const DISPATCH_CAPACITY: usize = 1_000;
@ -21,41 +24,31 @@ const MAX_PACKET: usize = 65536;
pub struct WireGuardTunnel { pub struct WireGuardTunnel {
pub(crate) source_peer_ip: IpAddr, pub(crate) source_peer_ip: IpAddr,
/// `boringtun` peer/tunnel implementation, used for crypto & WG protocol. /// `boringtun` peer/tunnel implementation, used for crypto & WG protocol.
peer: Box<Tunn>, peer: Mutex<Box<Tunn>>,
/// The UDP socket for the public WireGuard endpoint to connect to. /// The UDP socket for the public WireGuard endpoint to connect to.
udp: UdpSocket, udp: UdpSocket,
/// The address of the public WireGuard endpoint (UDP). /// The address of the public WireGuard endpoint (UDP).
pub(crate) endpoint: SocketAddr, pub(crate) endpoint: SocketAddr,
/// Maps virtual ports to the corresponding IP packet dispatcher. /// Event bus
virtual_port_ip_tx: dashmap::DashMap<VirtualPort, tokio::sync::mpsc::Sender<Vec<u8>>>, bus: Bus,
/// IP packet dispatcher for unroutable packets. `None` if not initialized.
sink_ip_tx: RwLock<Option<tokio::sync::mpsc::Sender<Vec<u8>>>>,
/// The max transmission unit for WireGuard.
pub(crate) max_transmission_unit: usize,
} }
impl WireGuardTunnel { impl WireGuardTunnel {
/// Initialize a new WireGuard tunnel. /// Initialize a new WireGuard tunnel.
pub async fn new(config: &Config) -> anyhow::Result<Self> { pub async fn new(config: &Config, bus: Bus) -> anyhow::Result<Self> {
let source_peer_ip = config.source_peer_ip; let source_peer_ip = config.source_peer_ip;
let peer = Self::create_tunnel(config)?; let peer = Mutex::new(Box::new(Self::create_tunnel(config)?));
let endpoint = config.endpoint_addr; let endpoint = config.endpoint_addr;
let udp = UdpSocket::bind(match endpoint { let udp = UdpSocket::bind(config.endpoint_bind_addr)
SocketAddr::V4(_) => "0.0.0.0:0", .await
SocketAddr::V6(_) => "[::]:0", .context("Failed to create UDP socket for WireGuard connection")?;
})
.await
.with_context(|| "Failed to create UDP socket for WireGuard connection")?;
let virtual_port_ip_tx = Default::default();
Ok(Self { Ok(Self {
source_peer_ip, source_peer_ip,
peer, peer,
udp, udp,
endpoint, endpoint,
virtual_port_ip_tx, bus,
sink_ip_tx: RwLock::new(None),
max_transmission_unit: config.max_transmission_unit,
}) })
} }
@ -63,12 +56,16 @@ impl WireGuardTunnel {
pub async fn send_ip_packet(&self, packet: &[u8]) -> anyhow::Result<()> { pub async fn send_ip_packet(&self, packet: &[u8]) -> anyhow::Result<()> {
trace_ip_packet("Sending IP packet", packet); trace_ip_packet("Sending IP packet", packet);
let mut send_buf = [0u8; MAX_PACKET]; let mut send_buf = [0u8; MAX_PACKET];
match self.peer.encapsulate(packet, &mut send_buf) { let encapsulate_result = {
let mut peer = self.peer.lock().await;
peer.encapsulate(packet, &mut send_buf)
};
match encapsulate_result {
TunnResult::WriteToNetwork(packet) => { TunnResult::WriteToNetwork(packet) => {
self.udp self.udp
.send_to(packet, self.endpoint) .send_to(packet, self.endpoint)
.await .await
.with_context(|| "Failed to send encrypted IP packet to WireGuard endpoint.")?; .context("Failed to send encrypted IP packet to WireGuard endpoint.")?;
debug!( debug!(
"Sent {} bytes to WireGuard endpoint (encrypted IP packet)", "Sent {} bytes to WireGuard endpoint (encrypted IP packet)",
packet.len() packet.len()
@ -90,31 +87,20 @@ impl WireGuardTunnel {
Ok(()) Ok(())
} }
/// Register a virtual interface (using its assigned virtual port) with the given IP packet `Sender`. pub async fn produce_task(&self) -> ! {
pub fn register_virtual_interface( trace!("Starting WireGuard production task");
&self, let mut endpoint = self.bus.new_endpoint();
virtual_port: VirtualPort,
sender: tokio::sync::mpsc::Sender<Vec<u8>>,
) -> anyhow::Result<()> {
self.virtual_port_ip_tx.insert(virtual_port, sender);
Ok(())
}
/// Register a virtual interface (using its assigned virtual port) with the given IP packet `Sender`. loop {
pub async fn register_sink_interface( if let Event::OutboundInternetPacket(data) = endpoint.recv().await {
&self, match self.send_ip_packet(&data).await {
) -> anyhow::Result<tokio::sync::mpsc::Receiver<Vec<u8>>> { Ok(_) => {}
let (sender, receiver) = tokio::sync::mpsc::channel(DISPATCH_CAPACITY); Err(e) => {
error!("{:?}", e);
let mut sink_ip_tx = self.sink_ip_tx.write().await; }
*sink_ip_tx = Some(sender); }
}
Ok(receiver) }
}
/// Releases the virtual interface from IP dispatch.
pub fn release_virtual_interface(&self, virtual_port: VirtualPort) {
self.virtual_port_ip_tx.remove(&virtual_port);
} }
/// WireGuard Routine task. Handles Handshake, keep-alive, etc. /// WireGuard Routine task. Handles Handshake, keep-alive, etc.
@ -123,43 +109,62 @@ impl WireGuardTunnel {
loop { loop {
let mut send_buf = [0u8; MAX_PACKET]; let mut send_buf = [0u8; MAX_PACKET];
match self.peer.update_timers(&mut send_buf) { let tun_result = { self.peer.lock().await.update_timers(&mut send_buf) };
TunnResult::WriteToNetwork(packet) => { self.handle_routine_tun_result(tun_result).await;
debug!(
"Sending routine packet of {} bytes to WireGuard endpoint",
packet.len()
);
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
Err(e) => {
error!(
"Failed to send routine packet to WireGuard endpoint: {:?}",
e
);
}
};
}
TunnResult::Err(e) => {
error!(
"Failed to prepare routine packet for WireGuard endpoint: {:?}",
e
);
}
TunnResult::Done => {
// Sleep for a bit
tokio::time::sleep(Duration::from_millis(1)).await;
}
other => {
warn!("Unexpected WireGuard routine task state: {:?}", other);
}
}
} }
} }
#[async_recursion]
async fn handle_routine_tun_result<'a: 'async_recursion>(&self, result: TunnResult<'a>) -> () {
match result {
TunnResult::WriteToNetwork(packet) => {
debug!(
"Sending routine packet of {} bytes to WireGuard endpoint",
packet.len()
);
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
Err(e) => {
error!(
"Failed to send routine packet to WireGuard endpoint: {:?}",
e
);
}
};
}
TunnResult::Err(WireGuardError::ConnectionExpired) => {
warn!("Wireguard handshake has expired!");
let mut buf = vec![0u8; MAX_PACKET];
let result = self
.peer
.lock()
.await
.format_handshake_initiation(&mut buf[..], false);
self.handle_routine_tun_result(result).await
}
TunnResult::Err(e) => {
error!(
"Failed to prepare routine packet for WireGuard endpoint: {:?}",
e
);
}
TunnResult::Done => {
// Sleep for a bit
tokio::time::sleep(Duration::from_millis(1)).await;
}
other => {
warn!("Unexpected WireGuard routine task state: {:?}", other);
}
};
}
/// WireGuard consumption task. Receives encrypted packets from the WireGuard endpoint, /// WireGuard consumption task. Receives encrypted packets from the WireGuard endpoint,
/// decapsulates them, and dispatches newly received IP packets. /// decapsulates them, and dispatches newly received IP packets.
pub async fn consume_task(&self) -> ! { pub async fn consume_task(&self) -> ! {
trace!("Starting WireGuard consumption task"); trace!("Starting WireGuard consumption task");
let endpoint = self.bus.new_endpoint();
loop { loop {
let mut recv_buf = [0u8; MAX_PACKET]; let mut recv_buf = [0u8; MAX_PACKET];
@ -176,7 +181,11 @@ impl WireGuardTunnel {
}; };
let data = &recv_buf[..size]; let data = &recv_buf[..size];
match self.peer.decapsulate(None, data, &mut send_buf) { let decapsulate_result = {
let mut peer = self.peer.lock().await;
peer.decapsulate(None, data, &mut send_buf)
};
match decapsulate_result {
TunnResult::WriteToNetwork(packet) => { TunnResult::WriteToNetwork(packet) => {
match self.udp.send_to(packet, self.endpoint).await { match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {} Ok(_) => {}
@ -185,9 +194,10 @@ impl WireGuardTunnel {
continue; continue;
} }
}; };
let mut peer = self.peer.lock().await;
loop { loop {
let mut send_buf = [0u8; MAX_PACKET]; let mut send_buf = [0u8; MAX_PACKET];
match self.peer.decapsulate(None, &[], &mut send_buf) { match peer.decapsulate(None, &[], &mut send_buf) {
TunnResult::WriteToNetwork(packet) => { TunnResult::WriteToNetwork(packet) => {
match self.udp.send_to(packet, self.endpoint).await { match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {} Ok(_) => {}
@ -212,38 +222,8 @@ impl WireGuardTunnel {
// For debugging purposes: parse packet // For debugging purposes: parse packet
trace_ip_packet("Received IP packet", packet); trace_ip_packet("Received IP packet", packet);
match self.route_ip_packet(packet) { if let Some(proto) = self.route_protocol(packet) {
RouteResult::Dispatch(port) => { endpoint.send(Event::InboundInternetPacket(proto, packet.to_vec().into()));
let sender = self.virtual_port_ip_tx.get(&port);
if let Some(sender_guard) = sender {
let sender = sender_guard.value();
match sender.send(packet.to_vec()).await {
Ok(_) => {
trace!(
"Dispatched received IP packet to virtual port {}",
port
);
}
Err(e) => {
error!(
"Failed to dispatch received IP packet to virtual port {}: {}",
port, e
);
}
}
} else {
warn!("[{}] Race condition: failed to get virtual port sender after it was dispatched", port);
}
}
RouteResult::Sink => {
trace!("Sending unroutable IP packet received from WireGuard endpoint to sink interface");
self.route_ip_sink(packet).await.unwrap_or_else(|e| {
error!("Failed to send unroutable IP packet to sink: {:?}", e)
});
}
RouteResult::Drop => {
trace!("Dropped unroutable IP packet received from WireGuard endpoint");
}
} }
} }
_ => {} _ => {}
@ -251,102 +231,46 @@ impl WireGuardTunnel {
} }
} }
fn create_tunnel(config: &Config) -> anyhow::Result<Box<Tunn>> { fn create_tunnel(config: &Config) -> anyhow::Result<Tunn> {
let private = config.private_key.as_ref().clone();
let public = *config.endpoint_public_key.as_ref();
Tunn::new( Tunn::new(
config.private_key.clone(), private,
config.endpoint_public_key.clone(), public,
None, config.preshared_key,
config.keepalive_seconds, config.keepalive_seconds,
0, 0,
None, None,
) )
.map_err(|s| anyhow::anyhow!("{}", s)) .map_err(|s| anyhow::anyhow!("{}", s))
.with_context(|| "Failed to initialize boringtun Tunn") .context("Failed to initialize boringtun Tunn")
} }
/// Makes a decision on the handling of an incoming IP packet. /// Determine the inner protocol of the incoming IP packet (TCP/UDP).
fn route_ip_packet(&self, packet: &[u8]) -> RouteResult { fn route_protocol(&self, packet: &[u8]) -> Option<PortProtocol> {
match IpVersion::of_packet(packet) { match IpVersion::of_packet(packet) {
Ok(IpVersion::Ipv4) => Ipv4Packet::new_checked(&packet) Ok(IpVersion::Ipv4) => Ipv4Packet::new_checked(&packet)
.ok() .ok()
// Only care if the packet is destined for this tunnel // Only care if the packet is destined for this tunnel
.filter(|packet| Ipv4Addr::from(packet.dst_addr()) == self.source_peer_ip) .filter(|packet| packet.dst_addr() == self.source_peer_ip)
.map(|packet| match packet.protocol() { .and_then(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(self.route_tcp_segment(packet.payload())), IpProtocol::Tcp => Some(PortProtocol::Tcp),
IpProtocol::Udp => Some(self.route_udp_datagram(packet.payload())), IpProtocol::Udp => Some(PortProtocol::Udp),
// Unrecognized protocol, so we cannot determine where to route // Unrecognized protocol, so we cannot determine where to route
_ => Some(RouteResult::Drop), _ => None,
}) }),
.flatten()
.unwrap_or(RouteResult::Drop),
Ok(IpVersion::Ipv6) => Ipv6Packet::new_checked(&packet) Ok(IpVersion::Ipv6) => Ipv6Packet::new_checked(&packet)
.ok() .ok()
// Only care if the packet is destined for this tunnel // Only care if the packet is destined for this tunnel
.filter(|packet| Ipv6Addr::from(packet.dst_addr()) == self.source_peer_ip) .filter(|packet| packet.dst_addr() == self.source_peer_ip)
.map(|packet| match packet.next_header() { .and_then(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(self.route_tcp_segment(packet.payload())), IpProtocol::Tcp => Some(PortProtocol::Tcp),
IpProtocol::Udp => Some(self.route_udp_datagram(packet.payload())), IpProtocol::Udp => Some(PortProtocol::Udp),
// Unrecognized protocol, so we cannot determine where to route // Unrecognized protocol, so we cannot determine where to route
_ => Some(RouteResult::Drop), _ => None,
}) }),
.flatten() _ => None,
.unwrap_or(RouteResult::Drop),
_ => RouteResult::Drop,
}
}
/// Makes a decision on the handling of an incoming TCP segment.
fn route_tcp_segment(&self, segment: &[u8]) -> RouteResult {
TcpPacket::new_checked(segment)
.ok()
.map(|tcp| {
if self
.virtual_port_ip_tx
.get(&VirtualPort(tcp.dst_port(), PortProtocol::Tcp))
.is_some()
{
RouteResult::Dispatch(VirtualPort(tcp.dst_port(), PortProtocol::Tcp))
} else if tcp.rst() {
RouteResult::Drop
} else {
RouteResult::Sink
}
})
.unwrap_or(RouteResult::Drop)
}
/// Makes a decision on the handling of an incoming UDP datagram.
fn route_udp_datagram(&self, datagram: &[u8]) -> RouteResult {
UdpPacket::new_checked(datagram)
.ok()
.map(|udp| {
if self
.virtual_port_ip_tx
.get(&VirtualPort(udp.dst_port(), PortProtocol::Udp))
.is_some()
{
RouteResult::Dispatch(VirtualPort(udp.dst_port(), PortProtocol::Udp))
} else {
RouteResult::Drop
}
})
.unwrap_or(RouteResult::Drop)
}
/// Route a packet to the IP sink interface.
async fn route_ip_sink(&self, packet: &[u8]) -> anyhow::Result<()> {
let ip_sink_tx = self.sink_ip_tx.read().await;
if let Some(ip_sink_tx) = &*ip_sink_tx {
ip_sink_tx
.send(packet.to_vec())
.await
.with_context(|| "Failed to dispatch IP packet to sink interface")
} else {
warn!(
"Could not dispatch unroutable IP packet to sink because interface is not active."
);
Ok(())
} }
} }
} }
@ -370,12 +294,3 @@ fn trace_ip_packet(message: &str, packet: &[u8]) {
} }
} }
} }
enum RouteResult {
/// Dispatch the packet to the virtual port.
Dispatch(VirtualPort),
/// The packet is not routable, and should be sent to the sink interface.
Sink,
/// The packet is not routable, and can be safely ignored.
Drop,
}