Compare commits

..

182 commits

Author SHA1 Message Date
3ca5ae2181
Merge pull request #85 from aramperes/dependabot/cargo/async-trait-0.1.87
build(deps): bump async-trait from 0.1.83 to 0.1.87
2025-03-10 23:32:19 -04:00
ac83ddbd4d
Merge pull request #86 from aramperes/dependabot/cargo/anyhow-1.0.97
build(deps): bump anyhow from 1.0.94 to 1.0.97
2025-03-10 23:32:12 -04:00
17f424140d
Merge pull request #87 from aramperes/dependabot/cargo/tokio-1.44.0
build(deps): bump tokio from 1.42.0 to 1.44.0
2025-03-10 23:31:55 -04:00
dependabot[bot]
8030ca1a2d
build(deps): bump tokio from 1.42.0 to 1.44.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.42.0 to 1.44.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.42.0...tokio-1.44.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 16:58:22 +00:00
dependabot[bot]
7eddf3f17f
build(deps): bump anyhow from 1.0.94 to 1.0.97
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.94 to 1.0.97.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.94...1.0.97)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 16:47:17 +00:00
dependabot[bot]
bcfa43702a
build(deps): bump async-trait from 0.1.83 to 0.1.87
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.83 to 0.1.87.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.83...0.1.87)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 16:47:00 +00:00
d0fcab38c3 docs: update README and LICENSE 2025-01-25 21:45:38 -05:00
c83c9ec500
Merge pull request #67 from aramperes/dependabot/cargo/priority-queue-2.1.1 2024-12-11 19:56:22 -05:00
caadd415cd
Merge pull request #68 from aramperes/dependabot/cargo/pretty_env_logger-0.5.0 2024-12-11 19:55:18 -05:00
3a89f2877d
Merge pull request #69 from aramperes/dependabot/cargo/anyhow-1.0.94 2024-12-11 19:54:56 -05:00
341849762c
Merge pull request #70 from aramperes/dependabot/cargo/tokio-1.42.0 2024-12-11 19:54:32 -05:00
dependabot[bot]
57e6ddc74c
Bump tokio from 1.41.1 to 1.42.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.41.1 to 1.42.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.41.1...tokio-1.42.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-09 17:28:44 +00:00
dependabot[bot]
08d99b9d22
Bump anyhow from 1.0.93 to 1.0.94
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.93 to 1.0.94.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.93...1.0.94)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-09 17:28:37 +00:00
dependabot[bot]
2661a2d29f
Bump pretty_env_logger from 0.4.0 to 0.5.0
Bumps [pretty_env_logger](https://github.com/seanmonstar/pretty-env-logger) from 0.4.0 to 0.5.0.
- [Commits](https://github.com/seanmonstar/pretty-env-logger/compare/v0.4.0...v0.5.0)

---
updated-dependencies:
- dependency-name: pretty_env_logger
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 03:38:35 +00:00
dependabot[bot]
6722237902
Bump priority-queue from 1.4.0 to 2.1.1
Bumps [priority-queue](https://github.com/garro95/priority-queue) from 1.4.0 to 2.1.1.
- [Release notes](https://github.com/garro95/priority-queue/releases)
- [Commits](https://github.com/garro95/priority-queue/commits/2.1.1)

---
updated-dependencies:
- dependency-name: priority-queue
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 03:38:32 +00:00
83ef02c695
Create dependabot.yml 2024-12-01 22:37:33 -05:00
89c3b59610 fix: typo 2024-12-01 16:03:32 -05:00
c6544cfe05 fix: assume path is target 2024-12-01 16:01:27 -05:00
f75909fd8f fix: expected output location in release 2024-12-01 15:56:37 -05:00
52d1d589ac use cross to cross-build 2024-12-01 15:45:25 -05:00
eb9c0be437 force cargo build with target 2024-12-01 15:39:16 -05:00
d307a11819 release: v0.3.10 2024-12-01 15:33:13 -05:00
c4c52babae
Merge pull request #65 from aramperes/smoltcp-0.12 2024-12-01 15:31:03 -05:00
6b2f6148c6 chore: add linux-aarch64 build 2024-12-01 15:29:59 -05:00
991eef0311 chore: update MSRV to 1.80.0 2024-12-01 15:27:12 -05:00
0e93a6435a chore: udpate to smoltcp 0.12 2024-12-01 15:22:37 -05:00
ca3590a4c0 chore: bump minor dependencies 2024-12-01 15:13:46 -05:00
784ab97c8b release: v0.3.9; add macos-aarch64 build 2024-12-01 12:41:23 -05:00
f3661c0a2c fix docker build 2024-12-01 12:33:56 -05:00
4fa8304799 bump MSRV to 1.78.0 2024-12-01 12:30:13 -05:00
1f3d9f035f release: v0.3.8 2024-12-01 12:28:13 -05:00
06049161ab bump MSRV to 1.74.0 2024-12-01 12:27:41 -05:00
e26cca089f
Merge pull request #64 from aramperes/fix/63 2024-12-01 12:08:24 -05:00
88ce124544 formatting 2024-12-01 12:03:51 -05:00
9ccd2e19f6 increase default smoltcp interface limit and add to README 2024-12-01 12:03:41 -05:00
c86784ed70 log a better error regarding smoltcp max interface limit 2024-12-01 11:33:53 -05:00
e25c88410e
Merge pull request #61 from PeterDaveHelloKitchen/OptimizeDockerfile 2024-04-07 20:38:08 -04:00
Peter Dave Hello
2b6d21572e
Optimize apt-get commands to reduce image size in Dockerfile
This commit improves the Dockerfile by consolidating apt-get update and apt-get install commands into a single RUN statement and adding cleanup steps for the apt cache.
2024-04-07 01:32:37 +08:00
56c950d159 Use bail when possible 2023-12-24 15:23:12 -05:00
ce40f85efa Cleanup usage of anyhow with_context 2023-12-24 15:06:22 -05:00
3ccd000ea8 Minor dependency updates 2023-12-24 14:58:51 -05:00
5fd28164b5
Merge pull request #60 from aramperes/patch/boringtun-0.6 2023-12-24 14:45:45 -05:00
1d703facc0 Implement locking of Tunn in WireGuardTunnel 2023-12-24 14:42:34 -05:00
e23cfc3e7e Update to new x25519 primitives 2023-12-24 11:52:07 -05:00
0931ed496a update boringtun to 0.6.0 2023-12-24 11:51:28 -05:00
91e6c79832
Merge pull request #59 from aramperes/patch/smoltcp-0.11 2023-12-24 11:31:00 -05:00
72ab679142 update to smoltcp 0.11 2023-12-24 11:28:15 -05:00
10b88ccc60 cleanup: SockSet can be owned by static
ref: https://github.com/smoltcp-rs/smoltcp/pull/813
2023-12-24 11:23:58 -05:00
83beb48b07 release: v0.3.7 2023-12-23 21:59:55 -05:00
21fe78f540 Add docs/example for SOCKS proxy 2023-12-23 21:44:25 -05:00
c3b752678e
Merge pull request #58 from aramperes/patch/smoltcp-0.10 2023-12-23 21:23:41 -05:00
32f189e53a Revert virtual port for server socket 2023-12-23 21:01:35 -05:00
488a0e0807 remove AnyIP; fix IPv6 virtual addresses 2023-12-23 21:01:00 -05:00
ssrlive
38fc217a29 smoltcp version 0.10 applied 2023-12-23 20:22:01 -05:00
ae15b4203c release: v0.3.6 2023-12-21 15:34:08 -05:00
992e55bf91
Merge pull request #55 from lu-zero/update-clap 2023-12-21 15:31:27 -05:00
a100f90a92 chore: update MSRV to 1.70.0 2023-12-21 15:29:43 -05:00
Luca Barbato
1613d2bb5c Bump clap version 2023-12-21 15:24:44 -05:00
Luca Barbato
29fb98f02b Update deps 2023-12-21 15:24:44 -05:00
767b83d241
Merge pull request #57 from aramperes/chore/update-docker-rust-version 2023-12-21 15:21:58 -05:00
731218d943 Fix new clippy warnings 2023-12-21 15:16:57 -05:00
7200cc07e7 chore: update MSRV to 1.65.0 2023-12-20 17:31:39 -05:00
Marco Nalon
1997ae7ea8 chore: update Dockerfile rust version 1.63.0 -> 1.65.0 2023-12-20 16:59:27 +01:00
9f53198f17 Remove $ from README examples 2023-10-02 19:55:24 -04:00
998d1cfc8d Add maintenance disclaimer 2023-10-02 19:51:18 -04:00
0a06df59f9 Update copyright year 2023-10-02 18:17:52 -04:00
61da97f4aa Update release action to latest Ubuntu 2023-10-02 18:05:30 -04:00
c5e803192f Disable macos package install 2023-10-02 17:57:15 -04:00
2896a4fcdb Update dependencies and bump MSRV to 1.63 2023-10-02 17:55:05 -04:00
07e895c770 release: v0.3.5 2023-10-02 17:37:18 -04:00
c0d0a5cb02
Merge pull request #51 from george-hopkins/psk 2023-10-02 17:29:15 -04:00
1333ea8a7c Rename option to --preshared-key and add to README 2023-10-02 17:21:32 -04:00
6f143280d1 Pin older version of base64 for now 2023-10-02 17:07:37 -04:00
George Hopkins
653c314409 Support pre-shared key 2023-10-02 16:24:37 +02:00
43a20ef6b3 Update dependencies 2023-01-12 02:53:56 -05:00
4f935c5a2d reorder dep 2023-01-12 02:38:36 -05:00
5dc04d9614
Merge pull request #49 from aramperes/bytes 2023-01-12 01:46:41 -05:00
fa634a08dc Fix a clippy warning 2023-01-12 01:43:32 -05:00
76b6a6e346 Use bytes 2023-01-12 01:40:04 -05:00
e62b7d30fe release: v0.3.4 2022-09-25 17:29:04 -04:00
0553fce5c6 chore: bump msrv to 1.57 2022-09-25 17:24:03 -04:00
6c64531940 chore: update dependencies 2022-09-25 17:19:16 -04:00
77981371fc
Merge pull request #45 from TitanNano/issues/44 2022-09-25 16:55:24 -04:00
Jovan Gerodetti
fbc76e3fb0 Handle WireGuardError::ConnectionExpired #44 2022-09-25 22:34:03 +02:00
85195d8aba
Merge pull request #41 from samhug/stackoverflow
Fix stack overflow on windows
2022-08-20 09:17:14 -04:00
eb9b8ff15d
Merge pull request #42 from kianmeng/fix-typos-and-markdowns 2022-08-12 09:53:09 -04:00
Kian-Meng Ang
074e1b430c Fix typos and markdowns
Found via these commands:

    codespell -L crate
    markdownlint -f README.md --disable MD013 MD033 MD041
2022-08-12 18:45:14 +08:00
Sam Hug
cea343c2c9 heap alloc WireGuardTunnel::consume_task() future 2022-08-11 15:32:24 -07:00
aef90a5c0c Add --endpoint-bind-addr to README 2022-07-18 20:41:48 -04:00
b78cab58ee release: v0.3.3 2022-06-25 15:05:10 -04:00
8cee210ccb Expose boringtun x25519 primitives 2022-06-25 14:38:08 -04:00
96be421495 Increase MSRV to 1.56.1 2022-06-25 14:03:18 -04:00
c09a541788 Update dependencies 2022-06-25 13:55:26 -04:00
371a55bb71 release: 0.3.2 2022-06-25 11:15:16 -04:00
00b45f8cb4 Update to Edition 2021 and fix docker build 2022-06-25 11:12:23 -04:00
75bad318f4 release: v0.3.1 2022-06-25 10:54:47 -04:00
8c1bdb1700 Minimize tokio features 2022-06-25 10:49:37 -04:00
1a560434d4 Fix cargo check action 2022-06-25 10:39:44 -04:00
48eaf0f840 Allow onetun to be used as a library 2022-06-25 10:33:37 -04:00
f85692950f Split dependencies that are only used for the binary version of onetun 2022-06-24 01:45:31 -04:00
1c1399d5ff
Merge pull request #34 from SideStore/host-address-binding 2022-06-24 01:22:14 -04:00
Jackson Coxson
4162f62ae6 Change the error message from host to bind 2022-06-23 23:14:57 -06:00
Jackson Coxson
9bd7ec2cca Simplify IP version detection 2022-06-23 23:11:45 -06:00
Jackson Coxson
1680b17c47 Correct binding terminoligy IP version detection 2022-06-23 23:10:48 -06:00
Jackson Coxson
96e18edd19 Invert logic for IP version mismatch 2022-06-23 23:10:11 -06:00
a81f5fe5e6 Simplify README intro 2022-06-24 01:01:53 -04:00
Jackson Coxson
c647bc9a96 Rename host_addr to endpoint_bind_addr 2022-06-23 23:01:32 -06:00
14df68ecc9 Simplify README intro 2022-06-24 01:01:26 -04:00
Jackson Coxson
3ab108ad04 Move host address resolution logic to config 2022-06-23 22:59:19 -06:00
c8a62debb1 Simplify README intro 2022-06-24 00:57:28 -04:00
Jackson Coxson
b108b5f404 Clarify help instructions for host binding 2022-06-23 22:47:33 -06:00
Jackson Coxson
5e94a0f31e Add host address binding option 2022-06-22 23:06:16 -06:00
73671a4d07 Add argument and env variable for remote port forwarding.
Part of #6
2022-06-21 18:38:55 -04:00
52aba0115d Fix new clippy lint 2022-03-27 17:14:18 -04:00
472a4df69f README adjustments 2022-03-27 14:15:25 -04:00
7ebf8e0737 release: v0.3.0 2022-02-15 02:01:31 -05:00
bcd840f838
Merge pull request #33 from aramperes/boringtun-0.4.0 2022-02-15 01:45:21 -05:00
a44b8b48eb Update README 2022-02-15 01:41:13 -05:00
93116fae26 Update boringtun to 0.4.0 2022-02-15 01:31:41 -05:00
648154b5ee Add tcpdump example 2022-01-10 01:32:55 -05:00
45962f4356 Update Architecture section in README 2022-01-10 01:25:56 -05:00
47c6c588d2 udp: remove extra socket iteration in virtual iface 2022-01-10 00:46:52 -05:00
782f5e74bf Apply TcpStream fix to UdpSocket as well 2022-01-10 00:35:14 -05:00
2b15e581f2 release: v0.2.9 2022-01-09 22:58:16 -05:00
e99fe6b8fb
Merge pull request #32 from aramperes/22-fix 2022-01-09 22:57:32 -05:00
11f86c49d6 Ensure all bytes are written to TcpStream
Fixes #22
2022-01-09 22:52:48 -05:00
def5f22d3c release: v0.2.8 2022-01-08 17:50:29 -05:00
e06b6526b7 Process more than one UDP socket per poll 2022-01-08 17:49:07 -05:00
3b296d66c5 release: v0.2.7 2022-01-08 17:40:29 -05:00
1aadea86d5
Merge pull request #30 from aramperes/pcap 2022-01-08 17:39:53 -05:00
ff0f5b967e Add optional IP packet capture for WireGuard tunnel 2022-01-08 17:30:10 -05:00
953bc18279 Remove some clippy suppressions for udp files 2022-01-08 15:20:21 -05:00
b3776c8b05 release: v0.2.6 2022-01-08 15:12:23 -05:00
d9bccb79e5 Process all TCP virtual client sockets in one poll 2022-01-08 15:11:59 -05:00
daa2362915 release: v0.2.5 2022-01-08 14:52:06 -05:00
025c001abb Remove event tracing when reading from bus 2022-01-08 14:48:04 -05:00
2b18bd4ec3 Remove unused dependency. Improve trace logging perf. 2022-01-08 14:41:12 -05:00
2e204d80fd release: v0.2.4 2022-01-08 03:44:10 -05:00
5b388f2ea3
Merge pull request #28 from aramperes/bus-based 2022-01-08 03:42:29 -05:00
abd9df6be4 Implement event-based UDP interface 2022-01-08 03:40:20 -05:00
51788c9557 Improve reliability using event-based synchronization 2022-01-08 02:18:51 -05:00
62b2641627 release: v0.2.3 2021-12-21 03:43:15 -05:00
7b6229ca1e
Merge pull request #25 from tilosp/mtu 2021-12-20 15:02:37 -05:00
3b4071c823
Merge pull request #26 from tilosp/ipv6 2021-12-20 15:01:35 -05:00
Tilo Spannagel
ce9cfce8fc Fix ipv6 endpoints 2021-12-20 17:39:01 +00:00
Tilo Spannagel
23af49dde5 Allow configuration of max transmission unit 2021-12-20 17:21:43 +00:00
81264916e5 release: v0.2.2 2021-12-20 02:27:18 -05:00
a1263f6d79
Merge pull request #24 from tilosp/update-deps 2021-12-20 02:23:59 -05:00
d51144b693 Formatting 2021-12-20 02:19:54 -05:00
Tilo Spannagel
f270e64a5c Disable not needed features in smoltcp 2021-12-20 02:31:48 +00:00
Tilo Spannagel
af803ffc6a Update to smoltcp 0.8.0 2021-12-20 02:16:40 +00:00
Tilo Spannagel
40d7c18c85 Update pretty_env_logger to 0.4.0 2021-12-20 01:42:33 +00:00
Tilo Spannagel
67610929bc Remove build-binary feaure from boringtun 2021-12-20 01:41:02 +00:00
Tilo Spannagel
201d7c1ee6 Run cargo update 2021-12-20 01:39:23 +00:00
Tilo Spannagel
bc39fd6306 Pin smoltcp to commit 2021-12-20 01:28:58 +00:00
Tilo Spannagel
5ae1dca598 Pin boringtun to commit 2021-12-20 01:27:39 +00:00
ff97db3b44
Update README.md 2021-10-28 20:25:21 -04:00
c50fe8233a Wireguard -> WireGuard 2021-10-26 03:05:03 -04:00
83a5d01cc8 release: v0.2.1 2021-10-26 02:56:33 -04:00
23b651fb4b
Merge pull request #20 from aramperes/19-private-key-safety 2021-10-26 01:54:08 -05:00
75d6a0a11c Allow passing private key using file.
Adds warning about passing key directly in CLI.
On *nix systems, checks file permissions for warnings.

Fixes #19
2021-10-26 02:49:14 -04:00
7545c7a3a8 release: v0.2.0 2021-10-26 01:50:48 -04:00
1ed555a98c
Merge pull request #16 from aramperes/1-udp 2021-10-26 00:50:20 -05:00
4ecf16bc3f Update readme 2021-10-26 01:47:48 -04:00
1493feb184 Reduce udp client socket meta buffer 2021-10-26 01:20:02 -04:00
0da6fa51de udp: use tokio select instead of 1ms loop 2021-10-26 00:38:22 -04:00
faf157cfeb UDP port re-use during flooding 2021-10-26 00:03:44 -04:00
d975efefaf End-to-end UDP implementation
Port re-use still needs to be implemented to prevent exhaustion over
time, and flooding.
2021-10-25 19:05:42 -04:00
1bb1b448be
Merge pull request #18 from aramperes/logo 2021-10-20 21:49:38 -05:00
f40e1f8e53 Add logo
Co-authored-by: BorysSerbyn <borys.serbyn@gmail.com>
2021-10-20 22:48:35 -04:00
282d4f48eb Checkpoint 2021-10-20 19:04:56 -04:00
fb50ee7113 UDP virtual interface skeleton 2021-10-20 18:06:35 -04:00
cc91cce169 Basic UDP port pool 2021-10-20 16:49:24 -04:00
11c5ec99fd Replace lockfree with tokio::sync 2021-10-20 16:05:04 -04:00
5cec6d4943 Index ports with protocol in WG. Start writing UDP tunnel code with plans. 2021-10-19 01:55:04 -04:00
703f261344 Move TCP tunneling code to separate module 2021-10-19 01:00:05 -04:00
c2d0b9719a Refactor TCP virtual interface code out of main. Removed unused server socket buffer. 2021-10-19 00:43:59 -04:00
070c0f5162 Use Vec instead of static mut for socket storage. Update smoltcp to fix #17 2021-10-18 22:13:13 -04:00
dbced52070 Attempt reconnection in virtual client 2021-10-18 06:04:02 -04:00
ed835c47d3 Spawn tunnels in entirely separate threads 2021-10-18 03:54:13 -04:00
651ddaec49 Implement port-forwarder configuration parsing 2021-10-18 02:57:52 -04:00
cb09bb8857 WIP on UDP and multi-port-forward support 2021-10-17 21:57:45 -04:00
27 changed files with 3492 additions and 1401 deletions

4
.cargo/config.toml Normal file
View file

@ -0,0 +1,4 @@
[env]
# Each interface needs 1 IP allocated to the WireGuard peer IP.
# "8" = 7 tunnels per protocol.
SMOLTCP_IFACE_MAX_ADDR_COUNT = "8"

View file

@ -1,6 +1,6 @@
#!/bin/sh
brew install asciidoctor
# brew install asciidoctor
brew install openssl@1.1
cp /usr/local/opt/openssl@1.1/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/
# brew install openssl@1.1
# cp /usr/local/opt/openssl@1.1/lib/pkgconfig/*.pc /usr/local/lib/pkgconfig/

10
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,10 @@
# Please see the documentation for all configuration options:
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
rebase-strategy: "disabled"

BIN
.github/onetun.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

View file

@ -10,7 +10,7 @@ jobs:
matrix:
rust:
- stable
- 1.55.0
- 1.80.0
steps:
- name: Checkout sources
uses: actions/checkout@v2
@ -26,6 +26,12 @@ jobs:
with:
command: check
- name: Run cargo check without default features
uses: actions-rs/cargo@v1
with:
command: check
args: --no-default-features
test:
name: Test Suite
runs-on: ubuntu-latest
@ -33,7 +39,7 @@ jobs:
matrix:
rust:
- stable
- 1.55.0
- 1.80.0
steps:
- name: Checkout sources
uses: actions/checkout@v2

View file

@ -61,7 +61,7 @@ jobs:
run: echo "${{ env.VERSION }}" > artifacts/release-version
- name: Upload artifacts
uses: actions/upload-artifact@v1
uses: actions/upload-artifact@v4
with:
name: artifacts
path: artifacts
@ -75,20 +75,28 @@ jobs:
RUST_BACKTRACE: 1
strategy:
matrix:
build: [ linux-amd64, macos-intel, windows ]
build: [ linux-amd64, linux-aarch64, macos-aarch64, windows ]
include:
- build: linux-amd64
os: ubuntu-18.04
os: ubuntu-latest
rust: stable
target: x86_64-unknown-linux-musl
- build: macos-intel
cross: true
- build: linux-aarch64
os: ubuntu-latest
rust: stable
target: aarch64-unknown-linux-musl
cross: true
- build: macos-aarch64
os: macos-latest
rust: stable
target: x86_64-apple-darwin
target: aarch64-apple-darwin
cross: false
- build: windows
os: windows-2019
rust: stable
target: x86_64-pc-windows-msvc
cross: false
steps:
- name: Checkout repository
@ -97,7 +105,7 @@ jobs:
fetch-depth: 1
- name: Install packages (Ubuntu)
if: matrix.os == 'ubuntu-18.04'
if: matrix.os == 'ubuntu-latest'
run: |
.github/ci/ubuntu-install-packages
- name: Install packages (macOS)
@ -113,7 +121,7 @@ jobs:
target: ${{ matrix.target }}
- name: Get release download URL
uses: actions/download-artifact@v1
uses: actions/download-artifact@v4
with:
name: artifacts
path: artifacts
@ -126,17 +134,24 @@ jobs:
echo "release upload url: $release_upload_url"
- name: Build onetun binary
run: cargo build --release
shell: bash
run: |
if [ "${{ matrix.cross }}" = "true" ]; then
cargo install cross
cross build --release --target ${{ matrix.target }}
else
cargo build --release --target ${{ matrix.target }}
fi
- name: Prepare onetun binary
shell: bash
run: |
mkdir -p ci/assets
if [ "${{ matrix.build }}" = "windows" ]; then
cp "target/release/onetun.exe" "ci/assets/onetun.exe"
cp "target/${{ matrix.target }}/release/onetun.exe" "ci/assets/onetun.exe"
echo "ASSET=onetun.exe" >> $GITHUB_ENV
else
cp "target/release/onetun" "ci/assets/onetun-${{ matrix.build }}"
cp "target/${{ matrix.target }}/release/onetun" "ci/assets/onetun-${{ matrix.build }}"
echo "ASSET=onetun-${{ matrix.build }}" >> $GITHUB_ENV
fi

2
.gitignore vendored
View file

@ -2,3 +2,5 @@
/.idea
.envrc
*.log
*.pcap
.DS_Store

1308
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,18 +1,48 @@
[package]
name = "onetun"
version = "0.1.11"
edition = "2018"
version = "0.3.10"
edition = "2021"
license = "MIT"
description = "A cross-platform, user-space WireGuard port-forwarder that requires no system network configurations."
authors = ["Aram Peres <aram.peres@gmail.com>"]
repository = "https://github.com/aramperes/onetun"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
boringtun = { git = "https://github.com/cloudflare/boringtun", branch = "master" }
clap = { version = "2.33", default-features = false, features = ["suggestions"] }
# Required dependencies (bin and lib)
boringtun = { version = "0.6.0", default-features = false }
log = "0.4"
pretty_env_logger = "0.3"
anyhow = "1"
smoltcp = { git = "https://github.com/smoltcp-rs/smoltcp", branch = "master" }
tokio = { version = "1", features = ["full"] }
lockfree = "0.5.1"
futures = "0.3.17"
rand = "0.8.4"
tokio = { version = "1", features = [ "rt", "sync", "io-util", "net", "time", "fs", "macros" ] }
futures = "0.3"
rand = "0.8"
nom = "7"
async-trait = "0.1"
priority-queue = "2.1"
smoltcp = { version = "0.12", default-features = false, features = [
"std",
"log",
"medium-ip",
"proto-ipv4",
"proto-ipv6",
"socket-udp",
"socket-tcp",
] }
bytes = "1"
base64 = "0.13"
# forward boringtuns tracing events to log
tracing = { version = "0.1", default-features = false, features = ["log"] }
# bin-only dependencies
clap = { version = "4.4.11", default-features = false, features = ["suggestions", "std", "env", "help", "wrap_help"], optional = true }
pretty_env_logger = { version = "0.5", optional = true }
async-recursion = "1.0"
[features]
pcap = []
default = [ "bin" ]
bin = [ "clap", "pretty_env_logger", "pcap", "tokio/rt-multi-thread" ]
[lib]

View file

@ -1,10 +1,11 @@
FROM rust:1.55 as cargo-build
FROM rust:1.82.0 as cargo-build
WORKDIR /usr/src/onetun
COPY Cargo.toml Cargo.toml
# Placeholder to download dependencies and cache them using layering
RUN mkdir src/
RUN touch src/lib.rs
RUN echo "fn main() {println!(\"if you see this, the build broke\")}" > src/main.rs
RUN cargo build --release
RUN rm -f target/x86_64-unknown-linux-musl/release/deps/myapp*
@ -14,8 +15,9 @@ COPY . .
RUN cargo build --release
FROM debian:11-slim
RUN apt-get update
RUN apt-get install dumb-init -y
RUN apt-get update \
&& apt-get install dumb-init -y \
&& rm -rf /var/lib/apt/lists/*
COPY --from=cargo-build /usr/src/onetun/target/release/onetun /usr/local/bin/onetun

View file

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021 Aram Peres
Copyright (c) 2025 Aram Peres
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

255
README.md
View file

@ -1,37 +1,67 @@
<img align="right" alt="onetun" width="150" src=".github/onetun.png">
# onetun
A cross-platform, user-space WireGuard port-forwarder that requires no system network configurations.
A cross-platform, user-space WireGuard port-forwarder that requires **no root-access or system network configurations**.
[![crates.io](https://img.shields.io/crates/v/onetun.svg)](https://crates.io/crates/onetun)
[![MIT licensed](https://img.shields.io/crates/l/onetun.svg)](./LICENSE)
[![Build status](https://github.com/aramperes/onetun/actions/workflows/build.yml/badge.svg)](https://github.com/aramperes/onetun/actions)
[![Latest Release](https://img.shields.io/github/v/tag/aramperes/onetun?label=release)](https://github.com/aramperes/onetun/releases/latest)
## Use-case
- You have an existing WireGuard endpoint (router), accessible using its UDP endpoint (typically port 51820); and
- You have a peer on the WireGuard network, running a TCP server on a port accessible to the WireGuard network; and
- You want to access this TCP service from a second computer, on which you can't install WireGuard because you
can't (no root access) or don't want to (polluting OS configs).
Access TCP or UDP services running on your WireGuard network, from devices that don't have WireGuard installed.
For example, this can be useful to forward a port from a Kubernetes cluster to a server behind WireGuard,
without needing to install WireGuard in a Pod.
For example,
- Personal or shared computers where you can't install WireGuard (root)
- IoT and mobile devices
- Root-less containers
## Download
onetun is available to install from [crates.io](https://crates.io/crates/onetun) with Rust ≥1.80.0:
```shell
cargo install onetun
```
You can also download the binary for Windows, macOS (Apple Silicon), and Linux (amd64, arm64) from
the [Releases](https://github.com/aramperes/onetun/releases) page.
You can also run onetun using [Docker](https://hub.docker.com/r/aramperes/onetun):
```shell
docker run --rm --name onetun --user 1000 -p 8080:8080 aramperes/onetun \
0.0.0.0:8080:192.168.4.2:8080 [...options...]
```
You can also build onetun locally, using Rust ≥1.80.0:
```shell
git clone https://github.com/aramperes/onetun && cd onetun
cargo build --release
./target/release/onetun
```
## Usage
**onetun** opens a TCP port on your local system, from which traffic is forwarded to a TCP port on a peer in your
**onetun** opens a TCP or UDP port on your local system, from which traffic is forwarded to a port on a peer in your
WireGuard network. It requires no changes to your operating system's network interfaces: you don't need to have `root`
access, or install any WireGuard tool on your local system for it to work.
The only prerequisite is to register a peer IP and public key on the remote WireGuard endpoint; those are necessary for
the WireGuard endpoint to trust the onetun peer and for packets to be routed.
```
./onetun <SOURCE_ADDR> <DESTINATION_ADDR> \
```shell
onetun [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...] [...] \
--endpoint-addr <public WireGuard endpoint address> \
--endpoint-public-key <the public key of the peer on the endpoint> \
--private-key <private key assigned to onetun> \
--source-peer-ip <IP assigned to onetun> \
--keep-alive <optional persistent keep-alive in seconds> \
--log <optional log level, defaults to "info"
--log <optional log level, defaults to "info">
```
> Note: you can use environment variables for all of these flags. Use `onetun --help` for details.
@ -40,7 +70,7 @@ the WireGuard endpoint to trust the onetun peer and for packets to be routed.
Suppose your WireGuard endpoint has the following configuration, and is accessible from `140.30.3.182:51820`:
```
```shell
# /etc/wireguard/wg0.conf
[Interface]
@ -63,7 +93,7 @@ We want to access a web server on the friendly peer (`192.168.4.2`) on port `808
local port, say `127.0.0.1:8080`, that will tunnel through WireGuard to reach the peer web server:
```shell
./onetun 127.0.0.1:8080 192.168.4.2:8080 \
onetun 127.0.0.1:8080:192.168.4.2:8080 \
--endpoint-addr 140.30.3.182:51820 \
--endpoint-public-key 'PUB_****************************************' \
--private-key 'PRIV_BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' \
@ -73,58 +103,129 @@ local port, say `127.0.0.1:8080`, that will tunnel through WireGuard to reach th
You'll then see this log:
```
INFO onetun > Tunnelling [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```shell
INFO onetun > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Which means you can now access the port locally!
```
$ curl 127.0.0.1:8080
```shell
curl 127.0.0.1:8080
Hello world!
```
## Download
### Multiple tunnels in parallel
Normally I would publish `onetun` to crates.io. However, it depends on some features
in [smoltcp](https://github.com/smoltcp-rs/smoltcp) and
[boringtun](https://github.com/cloudflare/boringtun) that haven't been published yet, so I'm forced to use their Git
repos as dependencies for now.
In the meantime, you can download the binary for Windows, macOS (Intel), and Linux (amd64) from
the [Releases](https://github.com/aramperes/onetun/releases) page.
You can also run onetun using [Docker](https://hub.docker.com/r/aramperes/onetun):
**onetun** supports running multiple tunnels in parallel. For example:
```shell
docker run --rm --name onetun --user 1000 -p 8080:8080 aramperes/onetun \
0.0.0.0:8080 192.168.4.2:8080 [...options...]
onetun 127.0.0.1:8080:192.168.4.2:8080 127.0.0.1:8081:192.168.4.4:8081
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8081]->[192.168.4.4:8081] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
You can also build onetun locally, using Rust:
... would open TCP ports 8080 and 8081 locally, which forward to their respective ports on the different peers.
#### Maximum number of tunnels
`smoltcp` imposes a compile-time limit on the number of IP addresses assigned to an interface. **onetun** increases
the default value to support most use-cases. In effect, the default limit on the number of **onetun** peers
is **7 per protocol** (TCP and UDP).
Should you need more unique IP addresses to forward ports to, you can increase the limit in `.cargo/config.toml` and recompile **onetun**.
### UDP Support
**onetun** supports UDP forwarding. You can add `:UDP` at the end of the port-forward configuration, or `UDP,TCP` to support
both protocols on the same port (note that this opens 2 separate tunnels, just on the same port)
```shell
$ git clone https://github.com/aramperes/onetun && cd onetun
$ cargo build --release
$ ./target/release/onetun
onetun 127.0.0.1:8080:192.168.4.2:8080:UDP
INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
onetun 127.0.0.1:8080:192.168.4.2:8080:UDP,TCP
INFO onetun::tunnel > Tunneling UDP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Note: UDP support is totally experimental. You should read the UDP portion of the **Architecture** section before using
it in any production capacity.
### IPv6 Support
**onetun** supports both IPv4 and IPv6. In fact, you can use onetun to forward some IP version to another, e.g. 6-to-4:
```shell
onetun [::1]:8080:192.168.4.2:8080
INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Note that each tunnel can only support one "source" IP version and one "destination" IP version. If you want to support
both IPv4 and IPv6 on the same port, you should create a second port-forward:
```shell
onetun [::1]:8080:192.168.4.2:8080 127.0.0.1:8080:192.168.4.2:8080
INFO onetun::tunnel > Tunneling TCP [[::1]:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
### Packet Capture
For debugging purposes, you can enable the capture of IP packets sent between onetun and the WireGuard peer.
The output is a libpcap capture file that can be viewed with Wireshark.
```shell
onetun --pcap wg.pcap 127.0.0.1:8080:192.168.4.2:8080
INFO onetun::pcap > Capturing WireGuard IP packets to wg.pcap
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:8080]->[192.168.4.2:8080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
To capture packets sent to and from the onetun local port, you must use an external tool like `tcpdump` with root access:
```shell
sudo tcpdump -i lo -w local.pcap 'dst 127.0.0.1 && port 8080'
```
### WireGuard Options
By default, onetun will create the UDP socket to communicate with the WireGuard endpoint on all interfaces and on a dynamic port,
i.e. `0.0.0.0:0` for IPv4 endpoints, or `[::]:0` for IPv6.
You can bind to a static address instead using `--endpoint-bind-addr`:
```shell
onetun --endpoint-bind-addr 0.0.0.0:51820 --endpoint-addr 140.30.3.182:51820 [...]
```
The security of the WireGuard connection can be further enhanced with a **pre-shared key** (PSK). You can generate such a key with the `wg genpsk` command, and provide it using `--preshared-key`.
The peer must also have this key configured using the `PresharedKey` option.
```shell
onetun --preshared-key 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' [...]
```
## Architecture
**In short:** onetun uses [smoltcp's](https://github.com/smoltcp-rs/smoltcp) TCP/IP and UDP stack to generate IP packets
using its state machine ("virtual interface"). The generated IP packets are
encrypted by [boringtun](https://github.com/cloudflare/boringtun) and sent to the WireGuard endpoint. Encrypted IP packets received
from the WireGuard endpoint are decrypted using boringtun and sent through the smoltcp virtual interface state machine.
onetun creates "virtual sockets" in the virtual interface to forward data sent from inbound connections,
as well as to receive data from the virtual interface to forward back to the local client.
---
onetun uses [tokio](https://github.com/tokio-rs/tokio), the async runtime, to listen for new TCP connections on the
given port.
When a client connects to the local TCP port, it uses [smoltcp](https://github.com/smoltcp-rs/smoltcp) to
create a "virtual interface", with a "virtual client" and a "virtual server" for the connection. These "virtual"
components are the crux of how onetun works. They essentially replace the host's TCP/IP stack with smoltcp's, which
fully runs inside onetun. An ephemeral "virtual port" is also assigned to the connection, in order to route packets
back to the right connection.
When a client connects to the onetun's TCP port, a "virtual client" is
created in a [smoltcp](https://github.com/smoltcp-rs/smoltcp) "virtual" TCP/IP interface, which runs fully inside the onetun
process. An ephemeral "virtual port" is assigned to the "virtual client", which maps back to the local client.
When the real client opens the connection, the virtual client socket opens a TCP connection to the virtual server.
The virtual interface (implemented by smoltcp) in turn crafts the `SYN` segment and wraps it in an IP packet.
When the real client opens the connection, the virtual client socket opens a TCP connection to the virtual server
(a dummy socket bound to the remote host/port). The virtual interface in turn crafts the `SYN` segment and wraps it in an IP packet.
Because of how the virtual client and server are configured, the IP packet is crafted with a source address
being the configured `source-peer-ip` (`192.168.4.3` in the example above),
and the destination address is the remote peer's (`192.168.4.2`).
and the destination address matches the port-forward's configured destination (`192.168.4.2`).
By doing this, we let smoltcp handle the crafting of the IP packets, and the handling of the client's TCP states.
Instead of actually sending those packets to the virtual server,
@ -135,8 +236,8 @@ Once the WireGuard endpoint receives an encrypted IP packet, it decrypts it usin
It reads the destination address, re-encrypts the IP packet using the matching peer's public key, and sends it off to
the peer's UDP endpoint.
The remote peer receives the encrypted IP and decrypts it. It can then read the inner payload (the TCP segment),
forward it to the server's port, which handles the TCP segment. The server responds with `SYN-ACK`, which goes back through
The peer receives the encrypted IP and decrypts it. It can then read the inner payload (the TCP segment),
forward it to the server's port, which handles the TCP segment. The TCP server responds with `SYN-ACK`, which goes back through
the peer's local WireGuard interface, gets encrypted, forwarded to the WireGuard endpoint, and then finally back to onetun's UDP port.
When onetun receives an encrypted packet from the WireGuard endpoint, it decrypts it using boringtun.
@ -150,6 +251,72 @@ the virtual client to read it. When the virtual client reads data, it simply pus
This work is all made possible by [smoltcp](https://github.com/smoltcp-rs/smoltcp) and [boringtun](https://github.com/cloudflare/boringtun),
so special thanks to the developers of those libraries.
### UDP
UDP support is experimental. Since UDP messages are stateless, there is no perfect way for onetun to know when to release the
assigned virtual port back to the pool for a new peer to use. This would cause issues over time as running out of virtual ports
would mean new datagrams get dropped. To alleviate this, onetun will cap the amount of ports used by one peer IP address;
if another datagram comes in from a different port but with the same IP, the least recently used virtual port will be freed and assigned
to the new peer port. At that point, any datagram packets destined for the reused virtual port will be routed to the new peer,
and any datagrams received by the old peer will be dropped.
In addition, in cases where many IPs are exhausting the UDP virtual port pool in tandem, and a totally new peer IP sends data,
onetun will have to pick the least recently used virtual port from _any_ peer IP and reuse it. However, this is only allowed
if the least recently used port hasn't been used for a certain amount of time. If all virtual ports are truly "active"
(with at least one transmission within that time limit), the new datagram gets dropped due to exhaustion.
All in all, I would not recommend using UDP forwarding for public services, since it's most likely prone to simple DoS or DDoS.
## HTTP/SOCKS Proxy
**onetun** is a Transport-layer proxy (also known as port forwarding); it is not in scope to provide
a HTTP/SOCKS proxy server. However, you can easily chain **onetun** with a proxy server on a remote
that is locked down to your WireGuard network.
For example, you could run [dante-server](https://www.inet.no/dante/) on a peer (ex. `192.168.4.2`) with the following configuration:
```
# /etc/danted.conf
logoutput: syslog
user.privileged: root
user.unprivileged: nobody
internal: 192.168.4.2 port=1080
external: eth0
socksmethod: none
clientmethod: none
# Locks down proxy use to WireGuard peers (192.168.4.x)
client pass {
from: 192.168.4.0/24 to: 0.0.0.0/0
}
socks pass {
from: 192.168.4.0/24 to: 0.0.0.0/0
}
```
Then use **onetun** to expose the SOCKS5 proxy locally:
```shell
onetun 127.0.0.1:1080:192.168.4.2:1080
INFO onetun::tunnel > Tunneling TCP [127.0.0.1:1080]->[192.168.4.2:1080] (via [140.30.3.182:51820] as peer 192.168.4.3)
```
Test with `curl` (or configure your browser):
```shell
curl -x socks5://127.0.0.1:1080 https://ifconfig.me
```
## Contributing and Maintenance
I will gladly accept contributions to onetun, and set aside time to review all pull-requests.
Please consider opening a GitHub issue if you are unsure if your contribution is within the scope of the project.
**Disclaimer**: I do not have enough personal time to actively maintain onetun besides open-source contributions.
## License
MIT. See `LICENSE` for details.
MIT License. See `LICENSE` for details. Copyright &copy; 2025 Aram Peres.

View file

@ -1,130 +1,339 @@
use std::collections::HashSet;
use std::convert::TryFrom;
use std::fmt::{Display, Formatter};
use std::fs::read_to_string;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use std::sync::Arc;
use anyhow::Context;
use boringtun::crypto::{X25519PublicKey, X25519SecretKey};
use clap::{App, Arg};
use anyhow::{bail, Context};
pub use boringtun::x25519::{PublicKey, StaticSecret};
#[derive(Clone, Debug)]
const DEFAULT_PORT_FORWARD_SOURCE: &str = "127.0.0.1";
#[derive(Clone)]
pub struct Config {
pub(crate) source_addr: SocketAddr,
pub(crate) dest_addr: SocketAddr,
pub(crate) private_key: Arc<X25519SecretKey>,
pub(crate) endpoint_public_key: Arc<X25519PublicKey>,
pub(crate) endpoint_addr: SocketAddr,
pub(crate) source_peer_ip: IpAddr,
pub(crate) keepalive_seconds: Option<u16>,
pub(crate) log: String,
pub port_forwards: Vec<PortForwardConfig>,
#[allow(dead_code)]
pub remote_port_forwards: Vec<PortForwardConfig>,
pub private_key: Arc<StaticSecret>,
pub endpoint_public_key: Arc<PublicKey>,
pub preshared_key: Option<[u8; 32]>,
pub endpoint_addr: SocketAddr,
pub endpoint_bind_addr: SocketAddr,
pub source_peer_ip: IpAddr,
pub keepalive_seconds: Option<u16>,
pub max_transmission_unit: usize,
pub log: String,
pub warnings: Vec<String>,
pub pcap_file: Option<String>,
}
impl Config {
#[cfg(feature = "bin")]
pub fn from_args() -> anyhow::Result<Self> {
let matches = App::new("onetun")
use clap::{Arg, Command};
let mut warnings = vec![];
let matches = Command::new("onetun")
.author("Aram Peres <aram.peres@gmail.com>")
.version(env!("CARGO_PKG_VERSION"))
.args(&[
Arg::with_name("SOURCE_ADDR")
.required(true)
.takes_value(true)
.env("ONETUN_SOURCE_ADDR")
.help("The source address (IP + port) to forward from. Example: 127.0.0.1:2115"),
Arg::with_name("DESTINATION_ADDR")
.required(true)
.takes_value(true)
.env("ONETUN_DESTINATION_ADDR")
.help("The destination address (IP + port) to forward to. The IP should be a peer registered in the Wireguard endpoint. Example: 192.168.4.2:2116"),
Arg::with_name("private-key")
.required(true)
.takes_value(true)
Arg::new("PORT_FORWARD")
.required(false)
.num_args(1..)
.help("Port forward configurations. The format of each argument is [src_host:]<src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \
where [src_host] is the local IP to listen on, <src_port> is the local port to listen on, <dst_host> is the remote peer IP to forward to, and <dst_port> is the remote port to forward to. \
Environment variables of the form 'ONETUN_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\
Examples:\n\
\t127.0.0.1:8080:192.168.4.1:8081:TCP,UDP\n\
\t127.0.0.1:8080:192.168.4.1:8081:TCP\n\
\t0.0.0.0:8080:192.168.4.1:8081\n\
\t[::1]:8080:192.168.4.1:8081\n\
\t8080:192.168.4.1:8081\n\
\t8080:192.168.4.1:8081:TCP\n\
\tlocalhost:8080:192.168.4.1:8081:TCP\n\
\tlocalhost:8080:peer.intranet:8081:TCP\
"),
Arg::new("private-key")
.conflicts_with("private-key-file")
.num_args(1)
.long("private-key")
.env("ONETUN_PRIVATE_KEY")
.help("The private key of this peer. The corresponding public key should be registered in the Wireguard endpoint."),
Arg::with_name("endpoint-public-key")
.help("The private key of this peer. The corresponding public key should be registered in the WireGuard endpoint. \
You can also use '--private-key-file' to specify a file containing the key instead."),
Arg::new("private-key-file")
.num_args(1)
.long("private-key-file")
.env("ONETUN_PRIVATE_KEY_FILE")
.help("The path to a file containing the private key of this peer. The corresponding public key should be registered in the WireGuard endpoint."),
Arg::new("endpoint-public-key")
.required(true)
.takes_value(true)
.num_args(1)
.long("endpoint-public-key")
.env("ONETUN_ENDPOINT_PUBLIC_KEY")
.help("The public key of the Wireguard endpoint (remote)."),
Arg::with_name("endpoint-addr")
.help("The public key of the WireGuard endpoint (remote)."),
Arg::new("preshared-key")
.required(false)
.num_args(1)
.long("preshared-key")
.env("ONETUN_PRESHARED_KEY")
.help("The pre-shared key (PSK) as configured with the peer."),
Arg::new("endpoint-addr")
.required(true)
.takes_value(true)
.num_args(1)
.long("endpoint-addr")
.env("ONETUN_ENDPOINT_ADDR")
.help("The address (IP + port) of the Wireguard endpoint (remote). Example: 1.2.3.4:51820"),
Arg::with_name("source-peer-ip")
.help("The address (IP + port) of the WireGuard endpoint (remote). Example: 1.2.3.4:51820"),
Arg::new("endpoint-bind-addr")
.required(false)
.num_args(1)
.long("endpoint-bind-addr")
.env("ONETUN_ENDPOINT_BIND_ADDR")
.help("The address (IP + port) used to bind the local UDP socket for the WireGuard tunnel. Example: 1.2.3.4:30000. Defaults to 0.0.0.0:0 for IPv4 endpoints, or [::]:0 for IPv6 endpoints."),
Arg::new("source-peer-ip")
.required(true)
.takes_value(true)
.num_args(1)
.long("source-peer-ip")
.env("ONETUN_SOURCE_PEER_IP")
.help("The source IP to identify this peer as (local). Example: 192.168.4.3"),
Arg::with_name("keep-alive")
Arg::new("keep-alive")
.required(false)
.takes_value(true)
.num_args(1)
.long("keep-alive")
.env("ONETUN_KEEP_ALIVE")
.help("Configures a persistent keep-alive for the WireGuard tunnel, in seconds."),
Arg::with_name("log")
Arg::new("max-transmission-unit")
.required(false)
.takes_value(true)
.num_args(1)
.long("max-transmission-unit")
.env("ONETUN_MTU")
.default_value("1420")
.help("Configures the max-transmission-unit (MTU) of the WireGuard tunnel."),
Arg::new("log")
.required(false)
.num_args(1)
.long("log")
.env("ONETUN_LOG")
.default_value("info")
.help("Configures the log level and format.")
.help("Configures the log level and format."),
Arg::new("pcap")
.required(false)
.num_args(1)
.long("pcap")
.env("ONETUN_PCAP")
.help("Decrypts and captures IP packets on the WireGuard tunnel to a given output file."),
Arg::new("remote")
.required(false)
.num_args(1..)
.long("remote")
.short('r')
.help("Remote port forward configurations. The format of each argument is <src_port>:<dst_host>:<dst_port>[:TCP,UDP,...], \
where <src_port> is the port the other peers will reach the server with, <dst_host> is the IP to forward to, and <dst_port> is the port to forward to. \
The <src_port> will be bound on onetun's peer IP, as specified by --source-peer-ip. If you pass a different value for <src_host> here, it will be rejected.\n\
Note: <dst_host>:<dst_port> must be reachable by onetun. If referring to another WireGuard peer, use --bridge instead (not supported yet).\n\
Environment variables of the form 'ONETUN_REMOTE_PORT_FORWARD_[#]' are also accepted, where [#] starts at 1.\n\
Examples:\n\
\t--remote 8080:localhost:8081:TCP,UDP\n\
\t--remote 8080:[::1]:8081:TCP\n\
\t--remote 8080:google.com:80\
"),
]).get_matches();
// Combine `PORT_FORWARD` arg and `ONETUN_PORT_FORWARD_#` envs
let mut port_forward_strings = HashSet::new();
if let Some(values) = matches.get_many::<String>("PORT_FORWARD") {
for value in values {
port_forward_strings.insert(value.to_owned());
}
}
for n in 1.. {
if let Ok(env) = std::env::var(format!("ONETUN_PORT_FORWARD_{}", n)) {
port_forward_strings.insert(env);
} else {
break;
}
}
// Parse `PORT_FORWARD` strings into `PortForwardConfig`
let port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> = port_forward_strings
.into_iter()
.map(|s| PortForwardConfig::from_notation(&s, DEFAULT_PORT_FORWARD_SOURCE))
.collect();
let port_forwards: Vec<PortForwardConfig> = port_forwards
.context("Failed to parse port forward config")?
.into_iter()
.flatten()
.collect();
// Read source-peer-ip
let source_peer_ip = parse_ip(matches.get_one::<String>("source-peer-ip"))
.context("Invalid source peer IP")?;
// Combined `remote` arg and `ONETUN_REMOTE_PORT_FORWARD_#` envs
let mut port_forward_strings = HashSet::new();
if let Some(values) = matches.get_many::<String>("remote") {
for value in values {
port_forward_strings.insert(value.to_owned());
}
}
for n in 1.. {
if let Ok(env) = std::env::var(format!("ONETUN_REMOTE_PORT_FORWARD_{}", n)) {
port_forward_strings.insert(env);
} else {
break;
}
}
// Parse `PORT_FORWARD` strings into `PortForwardConfig`
let remote_port_forwards: anyhow::Result<Vec<Vec<PortForwardConfig>>> =
port_forward_strings
.into_iter()
.map(|s| {
PortForwardConfig::from_notation(
&s,
matches.get_one::<String>("source-peer-ip").unwrap(),
)
})
.collect();
let mut remote_port_forwards: Vec<PortForwardConfig> = remote_port_forwards
.context("Failed to parse remote port forward config")?
.into_iter()
.flatten()
.collect();
for port_forward in remote_port_forwards.iter_mut() {
if port_forward.source.ip() != source_peer_ip {
bail!("Remote port forward config <src_host> must match --source-peer-ip ({}), or be omitted.", source_peer_ip);
}
port_forward.source = SocketAddr::from((source_peer_ip, port_forward.source.port()));
port_forward.remote = true;
}
if port_forwards.is_empty() && remote_port_forwards.is_empty() {
bail!("No port forward configurations given.");
}
// Read private key from file or CLI argument
let (group_readable, world_readable) = matches
.get_one::<String>("private-key-file")
.and_then(is_file_insecurely_readable)
.unwrap_or_default();
if group_readable {
warnings.push("Private key file is group-readable. This is insecure.".into());
}
if world_readable {
warnings.push("Private key file is world-readable. This is insecure.".into());
}
let private_key = if let Some(private_key_file) =
matches.get_one::<String>("private-key-file")
{
read_to_string(private_key_file)
.map(|s| s.trim().to_string())
.context("Failed to read private key file")
} else {
if std::env::var("ONETUN_PRIVATE_KEY").is_err() {
warnings.push("Private key was passed using CLI. This is insecure. \
Use \"--private-key-file <file containing private key>\", or the \"ONETUN_PRIVATE_KEY\" env variable instead.".into());
}
matches
.get_one::<String>("private-key")
.cloned()
.context("Missing private key")
}?;
let endpoint_addr = parse_addr(matches.get_one::<String>("endpoint-addr"))
.context("Invalid endpoint address")?;
let endpoint_bind_addr = if let Some(addr) = matches.get_one::<String>("endpoint-bind-addr")
{
let addr = parse_addr(Some(addr)).context("Invalid bind address")?;
// Make sure the bind address and endpoint address are the same IP version
if addr.ip().is_ipv4() != endpoint_addr.ip().is_ipv4() {
bail!("Endpoint and bind addresses must be the same IP version");
}
addr
} else {
// Return the IP version of the endpoint address
match endpoint_addr {
SocketAddr::V4(_) => parse_addr(Some("0.0.0.0:0"))?,
SocketAddr::V6(_) => parse_addr(Some("[::]:0"))?,
}
};
Ok(Self {
source_addr: parse_addr(matches.value_of("SOURCE_ADDR"))
.with_context(|| "Invalid source address")?,
dest_addr: parse_addr(matches.value_of("DESTINATION_ADDR"))
.with_context(|| "Invalid destination address")?,
private_key: Arc::new(
parse_private_key(matches.value_of("private-key"))
.with_context(|| "Invalid private key")?,
),
port_forwards,
remote_port_forwards,
private_key: Arc::new(parse_private_key(&private_key).context("Invalid private key")?),
endpoint_public_key: Arc::new(
parse_public_key(matches.value_of("endpoint-public-key"))
.with_context(|| "Invalid endpoint public key")?,
parse_public_key(matches.get_one::<String>("endpoint-public-key"))
.context("Invalid endpoint public key")?,
),
endpoint_addr: parse_addr(matches.value_of("endpoint-addr"))
.with_context(|| "Invalid endpoint address")?,
source_peer_ip: parse_ip(matches.value_of("source-peer-ip"))
.with_context(|| "Invalid source peer IP")?,
keepalive_seconds: parse_keep_alive(matches.value_of("keep-alive"))
.with_context(|| "Invalid keep-alive value")?,
log: matches.value_of("log").unwrap_or_default().into(),
preshared_key: parse_preshared_key(matches.get_one::<String>("preshared-key"))?,
endpoint_addr,
endpoint_bind_addr,
source_peer_ip,
keepalive_seconds: parse_keep_alive(matches.get_one::<String>("keep-alive"))
.context("Invalid keep-alive value")?,
max_transmission_unit: parse_mtu(matches.get_one::<String>("max-transmission-unit"))
.context("Invalid max-transmission-unit value")?,
log: matches
.get_one::<String>("log")
.cloned()
.unwrap_or_default(),
pcap_file: matches.get_one::<String>("pcap").cloned(),
warnings,
})
}
}
fn parse_addr(s: Option<&str>) -> anyhow::Result<SocketAddr> {
s.with_context(|| "Missing address")?
fn parse_addr<T: AsRef<str>>(s: Option<T>) -> anyhow::Result<SocketAddr> {
s.context("Missing address")?
.as_ref()
.to_socket_addrs()
.with_context(|| "Invalid address")?
.context("Invalid address")?
.next()
.with_context(|| "Could not lookup address")
.context("Could not lookup address")
}
fn parse_ip(s: Option<&str>) -> anyhow::Result<IpAddr> {
s.with_context(|| "Missing IP")?
fn parse_ip(s: Option<&String>) -> anyhow::Result<IpAddr> {
s.context("Missing IP address")?
.parse::<IpAddr>()
.with_context(|| "Invalid IP address")
.context("Invalid IP address")
}
fn parse_private_key(s: Option<&str>) -> anyhow::Result<X25519SecretKey> {
s.with_context(|| "Missing private key")?
.parse::<X25519SecretKey>()
.map_err(|e| anyhow::anyhow!("{}", e))
.with_context(|| "Invalid private key")
fn parse_private_key(s: &str) -> anyhow::Result<StaticSecret> {
let decoded = base64::decode(s).context("Failed to decode private key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(StaticSecret::from(bytes))
} else {
bail!("Invalid private key")
}
}
fn parse_public_key(s: Option<&str>) -> anyhow::Result<X25519PublicKey> {
s.with_context(|| "Missing public key")?
.parse::<X25519PublicKey>()
.map_err(|e| anyhow::anyhow!("{}", e))
.with_context(|| "Invalid public key")
fn parse_public_key(s: Option<&String>) -> anyhow::Result<PublicKey> {
let encoded = s.context("Missing public key")?;
let decoded = base64::decode(encoded).context("Failed to decode public key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(PublicKey::from(bytes))
} else {
bail!("Invalid public key")
}
}
fn parse_keep_alive(s: Option<&str>) -> anyhow::Result<Option<u16>> {
fn parse_preshared_key(s: Option<&String>) -> anyhow::Result<Option<[u8; 32]>> {
if let Some(s) = s {
let decoded = base64::decode(s).context("Failed to decode preshared key")?;
if let Ok::<[u8; 32], _>(bytes) = decoded.try_into() {
Ok(Some(bytes))
} else {
bail!("Invalid preshared key")
}
} else {
Ok(None)
}
}
fn parse_keep_alive(s: Option<&String>) -> anyhow::Result<Option<u16>> {
if let Some(s) = s {
let parsed: u16 = s.parse().with_context(|| {
format!(
@ -137,3 +346,369 @@ fn parse_keep_alive(s: Option<&str>) -> anyhow::Result<Option<u16>> {
Ok(None)
}
}
fn parse_mtu(s: Option<&String>) -> anyhow::Result<usize> {
s.context("Missing MTU")?.parse().context("Invalid MTU")
}
#[cfg(unix)]
fn is_file_insecurely_readable(path: &String) -> Option<(bool, bool)> {
use std::fs::File;
use std::os::unix::fs::MetadataExt;
let mode = File::open(path).ok()?.metadata().ok()?.mode();
Some((mode & 0o40 > 0, mode & 0o4 > 0))
}
#[cfg(not(unix))]
fn is_file_insecurely_readable(_path: &String) -> Option<(bool, bool)> {
// No good way to determine permissions on non-Unix target
None
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub struct PortForwardConfig {
/// The source IP and port where the local server will run.
pub source: SocketAddr,
/// The destination IP and port to which traffic will be forwarded.
pub destination: SocketAddr,
/// The transport protocol to use for the port (Layer 4).
pub protocol: PortProtocol,
/// Whether this is a remote port forward.
pub remote: bool,
}
impl PortForwardConfig {
/// Converts a string representation into `PortForwardConfig`.
///
/// Sample formats:
/// - `127.0.0.1:8080:192.168.4.1:8081:TCP,UDP`
/// - `127.0.0.1:8080:192.168.4.1:8081:TCP`
/// - `0.0.0.0:8080:192.168.4.1:8081`
/// - `[::1]:8080:192.168.4.1:8081`
/// - `8080:192.168.4.1:8081`
/// - `8080:192.168.4.1:8081:TCP`
/// - `localhost:8080:192.168.4.1:8081:TCP`
/// - `localhost:8080:peer.intranet:8081:TCP`
///
/// Implementation Notes:
/// - The format is formalized as `[src_host:]<src_port>:<dst_host>:<dst_port>[:PROTO1,PROTO2,...]`
/// - `src_host` is optional and defaults to `127.0.0.1`.
/// - `src_host` and `dst_host` may be specified as IPv4, IPv6, or a FQDN to be resolved by DNS.
/// - IPv6 addresses must be prefixed with `[` and suffixed with `]`. Example: `[::1]`.
/// - Any `u16` is accepted as `src_port` and `dst_port`
/// - Specifying protocols (`PROTO1,PROTO2,...`) is optional and defaults to `TCP`. Values must be separated by commas.
pub fn from_notation(s: &str, default_source: &str) -> anyhow::Result<Vec<PortForwardConfig>> {
mod parsers {
use nom::branch::alt;
use nom::bytes::complete::is_not;
use nom::character::complete::{alpha1, char, digit1};
use nom::combinator::{complete, map, opt, success};
use nom::error::ErrorKind;
use nom::multi::separated_list1;
use nom::sequence::{delimited, preceded, separated_pair, tuple};
use nom::IResult;
fn ipv6(s: &str) -> IResult<&str, &str> {
delimited(char('['), is_not("]"), char(']'))(s)
}
fn ipv4_or_fqdn(s: &str) -> IResult<&str, &str> {
let s = is_not(":")(s)?;
if s.1.chars().all(|c| c.is_ascii_digit()) {
// If ipv4 or fqdn is all digits, it's not valid.
Err(nom::Err::Error(nom::error::ParseError::from_error_kind(
s.1,
ErrorKind::Fail,
)))
} else {
Ok(s)
}
}
fn port(s: &str) -> IResult<&str, &str> {
digit1(s)
}
fn ip_or_fqdn(s: &str) -> IResult<&str, &str> {
alt((ipv6, ipv4_or_fqdn))(s)
}
fn no_ip(s: &str) -> IResult<&str, Option<&str>> {
success(None)(s)
}
fn src_addr(s: &str) -> IResult<&str, (Option<&str>, &str)> {
let with_ip = separated_pair(map(ip_or_fqdn, Some), char(':'), port);
let without_ip = tuple((no_ip, port));
alt((with_ip, without_ip))(s)
}
fn dst_addr(s: &str) -> IResult<&str, (&str, &str)> {
separated_pair(ip_or_fqdn, char(':'), port)(s)
}
fn protocol(s: &str) -> IResult<&str, &str> {
alpha1(s)
}
fn protocols(s: &str) -> IResult<&str, Option<Vec<&str>>> {
opt(preceded(char(':'), separated_list1(char(','), protocol)))(s)
}
#[allow(clippy::type_complexity)]
pub fn port_forward(
s: &str,
) -> IResult<&str, ((Option<&str>, &str), (), (&str, &str), Option<Vec<&str>>)>
{
complete(tuple((
src_addr,
map(char(':'), |_| ()),
dst_addr,
protocols,
)))(s)
}
}
// TODO: Could improve error management with custom errors, so that the messages are more helpful.
let (src_addr, _, dst_addr, protocols) = parsers::port_forward(s)
.map_err(|e| anyhow::anyhow!("Invalid port-forward definition: {}", e))?
.1;
let source = (
src_addr.0.unwrap_or(default_source),
src_addr.1.parse::<u16>().context("Invalid source port")?,
)
.to_socket_addrs()
.context("Invalid source address")?
.next()
.context("Could not resolve source address")?;
let destination = (
dst_addr.0,
dst_addr.1.parse::<u16>().context("Invalid source port")?,
)
.to_socket_addrs() // TODO: Pass this as given and use DNS config instead (issue #15)
.context("Invalid destination address")?
.next()
.context("Could not resolve destination address")?;
// Parse protocols
let protocols = if let Some(protocols) = protocols {
let protocols: anyhow::Result<Vec<PortProtocol>> =
protocols.into_iter().map(PortProtocol::try_from).collect();
protocols
} else {
Ok(vec![PortProtocol::Tcp])
}
.context("Failed to parse protocols")?;
// Returns an config for each protocol
Ok(protocols
.into_iter()
.map(|protocol| Self {
source,
destination,
protocol,
remote: false,
})
.collect())
}
}
impl Display for PortForwardConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.remote {
write!(
f,
"(remote){}:{}:{}",
self.source, self.destination, self.protocol
)
} else {
write!(f, "{}:{}:{}", self.source, self.destination, self.protocol)
}
}
}
/// Layer 7 protocols for ports.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub enum PortProtocol {
/// TCP
Tcp,
/// UDP
Udp,
}
impl TryFrom<&str> for PortProtocol {
type Error = anyhow::Error;
fn try_from(value: &str) -> anyhow::Result<Self> {
match value.to_uppercase().as_str() {
"TCP" => Ok(Self::Tcp),
"UDP" => Ok(Self::Udp),
_ => Err(anyhow::anyhow!("Invalid protocol specifier: {}", value)),
}
}
}
impl Display for PortProtocol {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Self::Tcp => "TCP",
Self::Udp => "UDP",
}
)
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_1() {
assert_eq!(
PortForwardConfig::from_notation(
"192.168.0.1:8080:192.168.4.1:8081:TCP,UDP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![
PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
},
PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Udp,
remote: false,
}
]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_2() {
assert_eq!(
PortForwardConfig::from_notation(
"192.168.0.1:8080:192.168.4.1:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("192.168.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_3() {
assert_eq!(
PortForwardConfig::from_notation(
"0.0.0.0:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("0.0.0.0:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_4() {
assert_eq!(
PortForwardConfig::from_notation(
"[::1]:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("[::1]:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_5() {
assert_eq!(
PortForwardConfig::from_notation("8080:192.168.4.1:8081", DEFAULT_PORT_FORWARD_SOURCE)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("127.0.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_6() {
assert_eq!(
PortForwardConfig::from_notation(
"8080:192.168.4.1:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: SocketAddr::from_str("127.0.0.1:8080").unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_7() {
assert_eq!(
PortForwardConfig::from_notation(
"localhost:8080:192.168.4.1:8081",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(),
destination: SocketAddr::from_str("192.168.4.1:8081").unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
/// Tests the parsing of `PortForwardConfig`.
#[test]
fn test_parse_port_forward_config_8() {
assert_eq!(
PortForwardConfig::from_notation(
"localhost:8080:localhost:8081:TCP",
DEFAULT_PORT_FORWARD_SOURCE
)
.expect("Failed to parse"),
vec![PortForwardConfig {
source: "localhost:8080".to_socket_addrs().unwrap().next().unwrap(),
destination: "localhost:8081".to_socket_addrs().unwrap().next().unwrap(),
protocol: PortProtocol::Tcp,
remote: false,
}]
);
}
}

190
src/events.rs Normal file
View file

@ -0,0 +1,190 @@
use bytes::Bytes;
use std::fmt::{Display, Formatter};
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use crate::config::PortForwardConfig;
use crate::virtual_iface::VirtualPort;
use crate::PortProtocol;
/// Events that go on the bus between the local server, smoltcp, and WireGuard.
#[derive(Debug, Clone)]
pub enum Event {
/// Dumb event with no data.
Dumb,
/// A new connection with the local server was initiated, and the given virtual port was assigned.
ClientConnectionInitiated(PortForwardConfig, VirtualPort),
/// A connection was dropped from the pool and should be closed in all interfaces.
ClientConnectionDropped(VirtualPort),
/// Data received by the local server that should be sent to the virtual server.
LocalData(PortForwardConfig, VirtualPort, Bytes),
/// Data received by the remote server that should be sent to the local client.
RemoteData(VirtualPort, Bytes),
/// IP packet received from the WireGuard tunnel that should be passed through the corresponding virtual device.
InboundInternetPacket(PortProtocol, Bytes),
/// IP packet to be sent through the WireGuard tunnel as crafted by the virtual device.
OutboundInternetPacket(Bytes),
/// Notifies that a virtual device read an IP packet.
VirtualDeviceFed(PortProtocol),
}
impl Display for Event {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Event::Dumb => {
write!(f, "Dumb{{}}")
}
Event::ClientConnectionInitiated(pf, vp) => {
write!(f, "ClientConnectionInitiated{{ pf={} vp={} }}", pf, vp)
}
Event::ClientConnectionDropped(vp) => {
write!(f, "ClientConnectionDropped{{ vp={} }}", vp)
}
Event::LocalData(pf, vp, data) => {
let size = data.len();
write!(f, "LocalData{{ pf={} vp={} size={} }}", pf, vp, size)
}
Event::RemoteData(vp, data) => {
let size = data.len();
write!(f, "RemoteData{{ vp={} size={} }}", vp, size)
}
Event::InboundInternetPacket(proto, data) => {
let size = data.len();
write!(
f,
"InboundInternetPacket{{ proto={} size={} }}",
proto, size
)
}
Event::OutboundInternetPacket(data) => {
let size = data.len();
write!(f, "OutboundInternetPacket{{ size={} }}", size)
}
Event::VirtualDeviceFed(proto) => {
write!(f, "VirtualDeviceFed{{ proto={} }}", proto)
}
}
}
}
#[derive(Clone)]
pub struct Bus {
counter: Arc<AtomicU32>,
bus: Arc<tokio::sync::broadcast::Sender<(u32, Event)>>,
}
impl Bus {
/// Creates a new event bus.
pub fn new() -> Self {
let (bus, _) = tokio::sync::broadcast::channel(1000);
let bus = Arc::new(bus);
let counter = Arc::new(AtomicU32::default());
Self { bus, counter }
}
/// Creates a new endpoint on the event bus.
pub fn new_endpoint(&self) -> BusEndpoint {
let id = self.counter.fetch_add(1, Ordering::Relaxed);
let tx = (*self.bus).clone();
let rx = self.bus.subscribe();
let tx = BusSender { id, tx };
BusEndpoint { id, tx, rx }
}
}
impl Default for Bus {
fn default() -> Self {
Self::new()
}
}
pub struct BusEndpoint {
id: u32,
tx: BusSender,
rx: tokio::sync::broadcast::Receiver<(u32, Event)>,
}
impl BusEndpoint {
/// Sends the event on the bus. Note that the messages sent by this endpoint won't reach itself.
pub fn send(&self, event: Event) {
self.tx.send(event)
}
/// Returns the unique sequential ID of this endpoint.
pub fn id(&self) -> u32 {
self.id
}
/// Awaits the next `Event` on the bus to be read.
pub async fn recv(&mut self) -> Event {
loop {
match self.rx.recv().await {
Ok((id, event)) => {
if id == self.id {
// If the event was sent by this endpoint, it is skipped
continue;
} else {
return event;
}
}
Err(_) => {
error!("Failed to read event bus from endpoint #{}", self.id);
return futures::future::pending().await;
}
}
}
}
/// Creates a new sender for this endpoint that can be cloned.
pub fn sender(&self) -> BusSender {
self.tx.clone()
}
}
#[derive(Clone)]
pub struct BusSender {
id: u32,
tx: tokio::sync::broadcast::Sender<(u32, Event)>,
}
impl BusSender {
/// Sends the event on the bus. Note that the messages sent by this endpoint won't reach itself.
pub fn send(&self, event: Event) {
trace!("#{} -> {}", self.id, event);
match self.tx.send((self.id, event)) {
Ok(_) => {}
Err(_) => error!("Failed to send event to bus from endpoint #{}", self.id),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_bus() {
let bus = Bus::new();
let mut endpoint_1 = bus.new_endpoint();
let mut endpoint_2 = bus.new_endpoint();
let mut endpoint_3 = bus.new_endpoint();
assert_eq!(endpoint_1.id(), 0);
assert_eq!(endpoint_2.id(), 1);
assert_eq!(endpoint_3.id(), 2);
endpoint_1.send(Event::Dumb);
let recv_2 = endpoint_2.recv().await;
let recv_3 = endpoint_3.recv().await;
assert!(matches!(recv_2, Event::Dumb));
assert!(matches!(recv_3, Event::Dumb));
endpoint_2.send(Event::Dumb);
let recv_1 = endpoint_1.recv().await;
let recv_3 = endpoint_3.recv().await;
assert!(matches!(recv_1, Event::Dumb));
assert!(matches!(recv_3, Event::Dumb));
}
}

View file

@ -1,35 +0,0 @@
use crate::virtual_device::VirtualIpDevice;
use crate::wg::WireGuardTunnel;
use smoltcp::iface::InterfaceBuilder;
use smoltcp::socket::SocketSet;
use std::sync::Arc;
use tokio::time::Duration;
/// A repeating task that processes unroutable IP packets.
pub async fn run_ip_sink_interface(wg: Arc<WireGuardTunnel>) -> ! {
// Initialize interface
let device = VirtualIpDevice::new_sink(wg)
.await
.expect("Failed to initialize VirtualIpDevice for sink interface");
// No sockets on sink interface
let mut socket_set_entries: [_; 0] = Default::default();
let mut socket_set = SocketSet::new(&mut socket_set_entries[..]);
let mut virtual_interface = InterfaceBuilder::new(device).ip_addrs([]).finalize();
loop {
let loop_start = smoltcp::time::Instant::now();
match virtual_interface.poll(&mut socket_set, loop_start) {
Ok(processed) if processed => {
trace!("[SINK] Virtual interface polled some packets to be processed",);
tokio::time::sleep(Duration::from_millis(1)).await;
}
Err(e) => {
error!("[SINK] Virtual interface poll error: {:?}", e);
}
_ => {
tokio::time::sleep(Duration::from_millis(5)).await;
}
}
}
}

122
src/lib.rs Normal file
View file

@ -0,0 +1,122 @@
#[macro_use]
extern crate log;
use std::sync::Arc;
use anyhow::Context;
use crate::config::{Config, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::tcp::TcpVirtualInterface;
use crate::virtual_iface::udp::UdpVirtualInterface;
use crate::virtual_iface::VirtualInterfacePoll;
use crate::wg::WireGuardTunnel;
pub mod config;
pub mod events;
#[cfg(feature = "pcap")]
pub mod pcap;
pub mod tunnel;
pub mod virtual_device;
pub mod virtual_iface;
pub mod wg;
/// Starts the onetun tunnels in separate tokio tasks.
///
/// Note: This future completes immediately.
pub async fn start_tunnels(config: Config, bus: Bus) -> anyhow::Result<()> {
// Initialize the port pool for each protocol
let tcp_port_pool = TcpPortPool::new();
let udp_port_pool = UdpPortPool::new();
#[cfg(feature = "pcap")]
if let Some(pcap_file) = config.pcap_file.clone() {
// Start packet capture
let bus = bus.clone();
tokio::spawn(async move { pcap::capture(pcap_file, bus).await });
}
let wg = WireGuardTunnel::new(&config, bus.clone())
.await
.context("Failed to initialize WireGuard tunnel")?;
let wg = Arc::new(wg);
{
// Start routine task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.routine_task().await });
}
{
// Start consumption task for WireGuard
let wg = wg.clone();
tokio::spawn(Box::pin(async move { wg.consume_task().await }));
}
{
// Start production task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.produce_task().await });
}
if config
.port_forwards
.iter()
.any(|pf| pf.protocol == PortProtocol::Tcp)
{
// TCP device
let bus = bus.clone();
let device =
VirtualIpDevice::new(PortProtocol::Tcp, bus.clone(), config.max_transmission_unit);
// Start TCP Virtual Interface
let port_forwards = config.port_forwards.clone();
let iface = TcpVirtualInterface::new(port_forwards, bus, config.source_peer_ip);
tokio::spawn(async move { iface.poll_loop(device).await });
}
if config
.port_forwards
.iter()
.any(|pf| pf.protocol == PortProtocol::Udp)
{
// UDP device
let bus = bus.clone();
let device =
VirtualIpDevice::new(PortProtocol::Udp, bus.clone(), config.max_transmission_unit);
// Start UDP Virtual Interface
let port_forwards = config.port_forwards.clone();
let iface = UdpVirtualInterface::new(port_forwards, bus, config.source_peer_ip);
tokio::spawn(async move { iface.poll_loop(device).await });
}
{
let port_forwards = config.port_forwards;
let source_peer_ip = config.source_peer_ip;
port_forwards
.into_iter()
.map(|pf| {
(
pf,
wg.clone(),
tcp_port_pool.clone(),
udp_port_pool.clone(),
bus.clone(),
)
})
.for_each(move |(pf, wg, tcp_port_pool, udp_port_pool, bus)| {
tokio::spawn(async move {
tunnel::port_forward(pf, source_peer_ip, tcp_port_pool, udp_port_pool, wg, bus)
.await
.unwrap_or_else(|e| error!("Port-forward failed for {} : {}", pf, e))
});
});
}
Ok(())
}

View file

@ -1,454 +1,36 @@
#[cfg(feature = "bin")]
#[macro_use]
extern crate log;
use std::net::{IpAddr, SocketAddr};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use smoltcp::iface::InterfaceBuilder;
use smoltcp::socket::{SocketSet, TcpSocket, TcpSocketBuffer, TcpState};
use smoltcp::wire::{IpAddress, IpCidr};
use tokio::net::{TcpListener, TcpStream};
use crate::config::Config;
use crate::port_pool::PortPool;
use crate::virtual_device::VirtualIpDevice;
use crate::wg::WireGuardTunnel;
pub mod config;
pub mod ip_sink;
pub mod port_pool;
pub mod virtual_device;
pub mod wg;
pub const MAX_PACKET: usize = 65536;
#[cfg(feature = "bin")]
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let config = Config::from_args().with_context(|| "Failed to read config")?;
use anyhow::Context;
use onetun::{config::Config, events::Bus};
let config = Config::from_args().context("Configuration has errors")?;
init_logger(&config)?;
let port_pool = Arc::new(PortPool::new());
let wg = WireGuardTunnel::new(&config)
.await
.with_context(|| "Failed to initialize WireGuard tunnel")?;
let wg = Arc::new(wg);
{
// Start routine task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.routine_task().await });
for warning in &config.warnings {
warn!("{}", warning);
}
{
// Start consumption task for WireGuard
let wg = wg.clone();
tokio::spawn(async move { wg.consume_task().await });
let bus = Bus::default();
onetun::start_tunnels(config, bus).await?;
futures::future::pending().await
}
{
// Start IP sink task for incoming IP packets
let wg = wg.clone();
tokio::spawn(async move { ip_sink::run_ip_sink_interface(wg).await });
#[cfg(not(feature = "bin"))]
fn main() -> anyhow::Result<()> {
Err(anyhow::anyhow!("Binary compiled without 'bin' feature"))
}
info!(
"Tunnelling [{}]->[{}] (via [{}] as peer {})",
&config.source_addr, &config.dest_addr, &config.endpoint_addr, &config.source_peer_ip
);
#[cfg(feature = "bin")]
fn init_logger(config: &onetun::config::Config) -> anyhow::Result<()> {
use anyhow::Context;
tcp_proxy_server(
config.source_addr,
config.source_peer_ip,
config.dest_addr,
port_pool.clone(),
wg,
)
.await
}
/// Starts the server that listens on TCP connections.
async fn tcp_proxy_server(
listen_addr: SocketAddr,
source_peer_ip: IpAddr,
dest_addr: SocketAddr,
port_pool: Arc<PortPool>,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(listen_addr)
.await
.with_context(|| "Failed to listen on TCP proxy server")?;
loop {
let wg = wg.clone();
let port_pool = port_pool.clone();
let (socket, peer_addr) = listener
.accept()
.await
.with_context(|| "Failed to accept connection on TCP proxy server")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
let virtual_port = match port_pool.next() {
Ok(port) => port,
Err(e) => {
error!(
"Failed to assign virtual port number for connection [{}]: {:?}",
peer_addr, e
);
continue;
}
};
info!("[{}] Incoming connection from {}", virtual_port, peer_addr);
tokio::spawn(async move {
let port_pool = Arc::clone(&port_pool);
let result = handle_tcp_proxy_connection(
socket,
virtual_port,
source_peer_ip,
dest_addr,
wg.clone(),
)
.await;
if let Err(e) = result {
error!(
"[{}] Connection dropped un-gracefully: {:?}",
virtual_port, e
);
} else {
info!("[{}] Connection closed by client", virtual_port);
}
// Release port when connection drops
wg.release_virtual_interface(virtual_port);
port_pool.release(virtual_port);
});
}
}
/// Handles a new TCP connection with its assigned virtual port.
async fn handle_tcp_proxy_connection(
socket: TcpStream,
virtual_port: u16,
source_peer_ip: IpAddr,
dest_addr: SocketAddr,
wg: Arc<WireGuardTunnel>,
) -> anyhow::Result<()> {
// Abort signal for stopping the Virtual Interface
let abort = Arc::new(AtomicBool::new(false));
// Signals that the Virtual Client is ready to send data
let (virtual_client_ready_tx, virtual_client_ready_rx) = tokio::sync::oneshot::channel::<()>();
// data_to_real_client_(tx/rx): This task reads the data from this mpsc channel to send back
// to the real client.
let (data_to_real_client_tx, mut data_to_real_client_rx) = tokio::sync::mpsc::channel(1_000);
// data_to_real_server_(tx/rx): This task sends the data received from the real client to the
// virtual interface (virtual server socket).
let (data_to_virtual_server_tx, data_to_virtual_server_rx) = tokio::sync::mpsc::channel(1_000);
// Spawn virtual interface
{
let abort = abort.clone();
tokio::spawn(async move {
virtual_tcp_interface(
virtual_port,
source_peer_ip,
dest_addr,
wg,
abort,
data_to_real_client_tx,
data_to_virtual_server_rx,
virtual_client_ready_tx,
)
.await
});
}
// Wait for virtual client to be ready.
virtual_client_ready_rx
.await
.expect("failed to wait for virtual client to be ready");
trace!("[{}] Virtual client is ready to send data", virtual_port);
loop {
tokio::select! {
readable_result = socket.readable() => {
match readable_result {
Ok(_) => {
// Buffer for the individual TCP segment.
let mut buffer = Vec::with_capacity(MAX_PACKET);
match socket.try_read_buf(&mut buffer) {
Ok(size) if size > 0 => {
let data = &buffer[..size];
debug!(
"[{}] Read {} bytes of TCP data from real client",
virtual_port, size
);
if let Err(e) = data_to_virtual_server_tx.send(data.to_vec()).await {
error!(
"[{}] Failed to dispatch data to virtual interface: {:?}",
virtual_port, e
);
}
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
error!(
"[{}] Failed to read from client TCP socket: {:?}",
virtual_port, e
);
break;
}
_ => {
break;
}
}
}
Err(e) => {
error!("[{}] Failed to check if readable: {:?}", virtual_port, e);
break;
}
}
}
data_recv_result = data_to_real_client_rx.recv() => {
match data_recv_result {
Some(data) => match socket.try_write(&data) {
Ok(size) => {
debug!(
"[{}] Wrote {} bytes of TCP data to real client",
virtual_port, size
);
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
if abort.load(Ordering::Relaxed) {
break;
} else {
continue;
}
}
Err(e) => {
error!(
"[{}] Failed to write to client TCP socket: {:?}",
virtual_port, e
);
}
},
None => {
if abort.load(Ordering::Relaxed) {
break;
} else {
continue;
}
},
}
}
}
}
trace!("[{}] TCP socket handler task terminated", virtual_port);
abort.store(true, Ordering::Relaxed);
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn virtual_tcp_interface(
virtual_port: u16,
source_peer_ip: IpAddr,
dest_addr: SocketAddr,
wg: Arc<WireGuardTunnel>,
abort: Arc<AtomicBool>,
data_to_real_client_tx: tokio::sync::mpsc::Sender<Vec<u8>>,
mut data_to_virtual_server_rx: tokio::sync::mpsc::Receiver<Vec<u8>>,
virtual_client_ready_tx: tokio::sync::oneshot::Sender<()>,
) -> anyhow::Result<()> {
let mut virtual_client_ready_tx = Some(virtual_client_ready_tx);
// Create a device and interface to simulate IP packets
// In essence:
// * TCP packets received from the 'real' client are 'sent' to the 'virtual server' via the 'virtual client'
// * Those TCP packets generate IP packets, which are captured from the interface and sent to the WireGuardTunnel
// * IP packets received by the WireGuardTunnel (from the endpoint) are fed into this 'virtual interface'
// * The interface processes those IP packets and routes them to the 'virtual client' (the rest is discarded)
// * The TCP data read by the 'virtual client' is sent to the 'real' TCP client
// Consumer for IP packets to send through the virtual interface
// Initialize the interface
let device = VirtualIpDevice::new(virtual_port, wg)
.with_context(|| "Failed to initialize VirtualIpDevice")?;
let mut virtual_interface = InterfaceBuilder::new(device)
.ip_addrs([
// Interface handles IP packets for the sender and recipient
IpCidr::new(IpAddress::from(source_peer_ip), 32),
IpCidr::new(IpAddress::from(dest_addr.ip()), 32),
])
.finalize();
// Server socket: this is a placeholder for the interface to route new connections to.
// TODO: Determine if we even need buffers here.
let server_socket: anyhow::Result<TcpSocket> = {
static mut TCP_SERVER_RX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
static mut TCP_SERVER_TX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
let tcp_rx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.listen((IpAddress::from(dest_addr.ip()), dest_addr.port()))
.with_context(|| "Virtual server socket failed to listen")?;
Ok(socket)
};
let client_socket: anyhow::Result<TcpSocket> = {
static mut TCP_SERVER_RX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
static mut TCP_SERVER_TX_DATA: [u8; MAX_PACKET] = [0; MAX_PACKET];
let tcp_rx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = TcpSocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.connect(
(IpAddress::from(dest_addr.ip()), dest_addr.port()),
(IpAddress::from(source_peer_ip), virtual_port),
)
.with_context(|| "Virtual server socket failed to listen")?;
Ok(socket)
};
// Socket set: there are always 2 sockets: 1 virtual client and 1 virtual server.
let mut socket_set_entries: [_; 2] = Default::default();
let mut socket_set = SocketSet::new(&mut socket_set_entries[..]);
let _server_handle = socket_set.add(server_socket?);
let client_handle = socket_set.add(client_socket?);
// Any data that wasn't sent because it was over the sending buffer limit
let mut tx_extra = Vec::new();
loop {
let loop_start = smoltcp::time::Instant::now();
// Shutdown occurs when the real client closes the connection,
// or if the client was in a CLOSE-WAIT state (after a server FIN) and had no data to send anmore.
// One last poll-loop iteration is executed so that the RST segment can be dispatched.
let shutdown = abort.load(Ordering::Relaxed);
if shutdown {
// Shutdown: sends a RST packet.
trace!("[{}] Shutting down virtual interface", virtual_port);
let mut client_socket = socket_set.get::<TcpSocket>(client_handle);
client_socket.abort();
}
match virtual_interface.poll(&mut socket_set, loop_start) {
Ok(processed) if processed => {
trace!(
"[{}] Virtual interface polled some packets to be processed",
virtual_port
);
}
Err(e) => {
error!("[{}] Virtual interface poll error: {:?}", virtual_port, e);
}
_ => {}
}
{
let mut client_socket = socket_set.get::<TcpSocket>(client_handle);
if client_socket.can_recv() {
match client_socket.recv(|buffer| (buffer.len(), buffer.to_vec())) {
Ok(data) => {
trace!(
"[{}] Virtual client received {} bytes of data",
virtual_port,
data.len()
);
// Send it to the real client
if let Err(e) = data_to_real_client_tx.send(data).await {
error!("[{}] Failed to dispatch data from virtual client to real client: {:?}", virtual_port, e);
}
}
Err(e) => {
error!(
"[{}] Failed to read from virtual client socket: {:?}",
virtual_port, e
);
}
}
}
if client_socket.can_send() {
if let Some(virtual_client_ready_tx) = virtual_client_ready_tx.take() {
virtual_client_ready_tx
.send(())
.expect("Failed to notify real client that virtual client is ready");
}
let mut to_transfer = None;
if tx_extra.is_empty() {
// The payload segment from the previous loop is complete,
// we can now read the next payload in the queue.
if let Ok(data) = data_to_virtual_server_rx.try_recv() {
to_transfer = Some(data);
} else if client_socket.state() == TcpState::CloseWait {
// No data to be sent in this loop. If the client state is CLOSE-WAIT (because of a server FIN),
// the interface is shutdown.
trace!("[{}] Shutting down virtual interface because client sent no more data, and server sent FIN (CLOSE-WAIT)", virtual_port);
abort.store(true, Ordering::Relaxed);
continue;
}
}
let to_transfer_slice = to_transfer.as_ref().unwrap_or(&tx_extra).as_slice();
if !to_transfer_slice.is_empty() {
let total = to_transfer_slice.len();
match client_socket.send_slice(to_transfer_slice) {
Ok(sent) => {
trace!(
"[{}] Sent {}/{} bytes via virtual client socket",
virtual_port,
sent,
total,
);
tx_extra = Vec::from(&to_transfer_slice[sent..total]);
}
Err(e) => {
error!(
"[{}] Failed to send slice via virtual client socket: {:?}",
virtual_port, e
);
}
}
}
}
}
if shutdown {
break;
}
tokio::time::sleep(Duration::from_millis(1)).await;
}
trace!("[{}] Virtual interface task terminated", virtual_port);
abort.store(true, Ordering::Relaxed);
Ok(())
}
fn init_logger(config: &Config) -> anyhow::Result<()> {
let mut builder = pretty_env_logger::formatted_builder();
let mut builder = pretty_env_logger::formatted_timed_builder();
builder.parse_filters(&config.log);
builder
.try_init()
.with_context(|| "Failed to initialize logger")
builder.try_init().context("Failed to initialize logger")
}

113
src/pcap.rs Normal file
View file

@ -0,0 +1,113 @@
use crate::events::Event;
use crate::Bus;
use anyhow::Context;
use smoltcp::time::Instant;
use tokio::fs::File;
use tokio::io::{AsyncWriteExt, BufWriter};
struct Pcap {
writer: BufWriter<File>,
}
/// libpcap file writer
/// This is mostly taken from `smoltcp`, but rewritten to be async.
impl Pcap {
async fn flush(&mut self) -> anyhow::Result<()> {
self.writer
.flush()
.await
.context("Failed to flush pcap writer")
}
async fn write(&mut self, data: &[u8]) -> anyhow::Result<usize> {
self.writer
.write(data)
.await
.with_context(|| format!("Failed to write {} bytes to pcap writer", data.len()))
}
async fn write_u16(&mut self, value: u16) -> anyhow::Result<()> {
self.writer
.write_u16(value)
.await
.context("Failed to write u16 to pcap writer")
}
async fn write_u32(&mut self, value: u32) -> anyhow::Result<()> {
self.writer
.write_u32(value)
.await
.context("Failed to write u32 to pcap writer")
}
async fn global_header(&mut self) -> anyhow::Result<()> {
self.write_u32(0xa1b2c3d4).await?; // magic number
self.write_u16(2).await?; // major version
self.write_u16(4).await?; // minor version
self.write_u32(0).await?; // timezone (= UTC)
self.write_u32(0).await?; // accuracy (not used)
self.write_u32(65535).await?; // maximum packet length
self.write_u32(101).await?; // link-layer header type (101 = IP)
self.flush().await
}
async fn packet_header(&mut self, timestamp: Instant, length: usize) -> anyhow::Result<()> {
assert!(length <= 65535);
self.write_u32(timestamp.secs() as u32).await?; // timestamp seconds
self.write_u32(timestamp.micros() as u32).await?; // timestamp microseconds
self.write_u32(length as u32).await?; // captured length
self.write_u32(length as u32).await?; // original length
Ok(())
}
async fn packet(&mut self, timestamp: Instant, packet: &[u8]) -> anyhow::Result<()> {
self.packet_header(timestamp, packet.len())
.await
.context("Failed to write packet header to pcap writer")?;
self.write(packet)
.await
.context("Failed to write packet to pcap writer")?;
self.writer
.flush()
.await
.context("Failed to flush pcap writer")?;
self.flush().await
}
}
/// Listens on the event bus for IP packets sent from and to the WireGuard tunnel.
pub async fn capture(pcap_file: String, bus: Bus) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
let file = File::create(&pcap_file)
.await
.context("Failed to create pcap file")?;
let writer = BufWriter::new(file);
let mut writer = Pcap { writer };
writer
.global_header()
.await
.context("Failed to write global header to pcap writer")?;
info!("Capturing WireGuard IP packets to {}", &pcap_file);
loop {
match endpoint.recv().await {
Event::InboundInternetPacket(_proto, ip) => {
let instant = Instant::now();
writer
.packet(instant, &ip)
.await
.context("Failed to write inbound IP packet to pcap writer")?;
}
Event::OutboundInternetPacket(ip) => {
let instant = Instant::now();
writer
.packet(instant, &ip)
.await
.context("Failed to write output IP packet to pcap writer")?;
}
_ => {}
}
}
}

View file

@ -1,62 +0,0 @@
use std::ops::Range;
use anyhow::Context;
use rand::seq::SliceRandom;
use rand::thread_rng;
const MIN_PORT: u16 = 32768;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// A pool of virtual ports available.
/// This structure is thread-safe and lock-free; you can use it safely in an `Arc`.
pub struct PortPool {
/// Remaining ports
inner: lockfree::queue::Queue<u16>,
/// Ports in use, with their associated IP channel sender.
taken: lockfree::set::Set<u16>,
}
impl Default for PortPool {
fn default() -> Self {
Self::new()
}
}
impl PortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let inner = lockfree::queue::Queue::default();
let mut ports: Vec<u16> = PORT_RANGE.collect();
ports.shuffle(&mut thread_rng());
ports.into_iter().for_each(|p| inner.push(p) as ());
Self {
inner,
taken: lockfree::set::Set::new(),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhaused max capacity).
pub fn next(&self) -> anyhow::Result<u16> {
let port = self
.inner
.pop()
.with_context(|| "Virtual port pool is exhausted")?;
self.taken
.insert(port)
.ok()
.with_context(|| "Failed to insert taken")?;
Ok(port)
}
/// Releases a port back into the pool.
pub fn release(&self, port: u16) {
self.inner.push(port);
self.taken.remove(&port);
}
/// Whether the given port is in use by a virtual interface.
pub fn is_in_use(&self, port: u16) -> bool {
self.taken.contains(&port)
}
}

34
src/tunnel/mod.rs Normal file
View file

@ -0,0 +1,34 @@
use std::net::IpAddr;
use std::sync::Arc;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Bus;
use crate::tunnel::tcp::TcpPortPool;
use crate::tunnel::udp::UdpPortPool;
use crate::wg::WireGuardTunnel;
pub mod tcp;
pub mod udp;
pub async fn port_forward(
port_forward: PortForwardConfig,
source_peer_ip: IpAddr,
tcp_port_pool: TcpPortPool,
udp_port_pool: UdpPortPool,
wg: Arc<WireGuardTunnel>,
bus: Bus,
) -> anyhow::Result<()> {
info!(
"Tunneling {} [{}]->[{}] (via [{}] as peer {})",
port_forward.protocol,
port_forward.source,
port_forward.destination,
&wg.endpoint,
source_peer_ip
);
match port_forward.protocol {
PortProtocol::Tcp => tcp::tcp_proxy_server(port_forward, tcp_port_pool, bus).await,
PortProtocol::Udp => udp::udp_proxy_server(port_forward, udp_port_pool, bus).await,
}
}

211
src/tunnel/tcp.rs Normal file
View file

@ -0,0 +1,211 @@
use std::collections::VecDeque;
use std::ops::Range;
use std::sync::Arc;
use std::time::Duration;
use anyhow::Context;
use bytes::BytesMut;
use rand::seq::SliceRandom;
use rand::thread_rng;
use tokio::io::AsyncWriteExt;
use tokio::net::{TcpListener, TcpStream};
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::{Bus, Event};
use crate::virtual_iface::VirtualPort;
const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// Starts the server that listens on TCP connections.
pub async fn tcp_proxy_server(
port_forward: PortForwardConfig,
port_pool: TcpPortPool,
bus: Bus,
) -> anyhow::Result<()> {
let listener = TcpListener::bind(port_forward.source)
.await
.context("Failed to listen on TCP proxy server")?;
loop {
let port_pool = port_pool.clone();
let (socket, peer_addr) = listener
.accept()
.await
.context("Failed to accept connection on TCP proxy server")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
let virtual_port = match port_pool.next().await {
Ok(port) => port,
Err(e) => {
error!(
"Failed to assign virtual port number for connection [{}]: {:?}",
peer_addr, e
);
continue;
}
};
info!("[{}] Incoming connection from {}", virtual_port, peer_addr);
let bus = bus.clone();
tokio::spawn(async move {
let port_pool = port_pool.clone();
let result = handle_tcp_proxy_connection(socket, virtual_port, port_forward, bus).await;
if let Err(e) = result {
error!(
"[{}] Connection dropped un-gracefully: {:?}",
virtual_port, e
);
} else {
info!("[{}] Connection closed by client", virtual_port);
}
tokio::time::sleep(Duration::from_millis(100)).await; // Make sure the other tasks have time to process the event
port_pool.release(virtual_port).await;
});
}
}
/// Handles a new TCP connection with its assigned virtual port.
async fn handle_tcp_proxy_connection(
mut socket: TcpStream,
virtual_port: VirtualPort,
port_forward: PortForwardConfig,
bus: Bus,
) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
endpoint.send(Event::ClientConnectionInitiated(port_forward, virtual_port));
let mut buffer = BytesMut::with_capacity(MAX_PACKET);
loop {
tokio::select! {
readable_result = socket.readable() => {
match readable_result {
Ok(_) => {
match socket.try_read_buf(&mut buffer) {
Ok(size) if size > 0 => {
let data = Vec::from(&buffer[..size]);
endpoint.send(Event::LocalData(port_forward, virtual_port, data.into()));
// Reset buffer
buffer.clear();
}
Err(ref e) if e.kind() == std::io::ErrorKind::WouldBlock => {
continue;
}
Err(e) => {
error!(
"[{}] Failed to read from client TCP socket: {:?}",
virtual_port, e
);
break;
}
_ => {
break;
}
}
}
Err(e) => {
error!("[{}] Failed to check if readable: {:?}", virtual_port, e);
break;
}
}
}
event = endpoint.recv() => {
match event {
Event::ClientConnectionDropped(e_vp) if e_vp == virtual_port => {
// This connection is supposed to be closed, stop the task.
break;
}
Event::RemoteData(e_vp, data) if e_vp == virtual_port => {
// Have remote data to send to the local client
if let Err(e) = socket.writable().await {
error!("[{}] Failed to check if writable: {:?}", virtual_port, e);
}
let expected = data.len();
let mut sent = 0;
loop {
if sent >= expected {
break;
}
match socket.write(&data[sent..expected]).await {
Ok(written) => {
debug!("[{}] Sent {} (expected {}) bytes to local client", virtual_port, written, expected);
sent += written;
if sent < expected {
debug!("[{}] Will try to resend remaining {} bytes to local client", virtual_port, (expected - written));
}
},
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, expected, e);
break;
}
}
}
}
_ => {}
}
}
}
}
// Notify other endpoints that this task has closed and no more data is to be sent to the local client
endpoint.send(Event::ClientConnectionDropped(virtual_port));
Ok(())
}
/// A pool of virtual ports available for TCP connections.
#[derive(Clone)]
pub struct TcpPortPool {
inner: Arc<tokio::sync::RwLock<TcpPortPoolInner>>,
}
impl Default for TcpPortPool {
fn default() -> Self {
Self::new()
}
}
impl TcpPortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let mut inner = TcpPortPoolInner::default();
let mut ports: Vec<u16> = PORT_RANGE.collect();
ports.shuffle(&mut thread_rng());
ports
.into_iter()
.for_each(|p| inner.queue.push_back(p) as ());
Self {
inner: Arc::new(tokio::sync::RwLock::new(inner)),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self) -> anyhow::Result<VirtualPort> {
let mut inner = self.inner.write().await;
let port = inner
.queue
.pop_front()
.context("TCP virtual port pool is exhausted")?;
Ok(VirtualPort::new(port, PortProtocol::Tcp))
}
/// Releases a port back into the pool.
pub async fn release(&self, port: VirtualPort) {
let mut inner = self.inner.write().await;
inner.queue.push_back(port.num());
}
}
/// Non thread-safe inner logic for TCP port pool.
#[derive(Debug, Default)]
struct TcpPortPoolInner {
/// Remaining ports in the pool.
queue: VecDeque<u16>,
}

257
src/tunnel/udp.rs Normal file
View file

@ -0,0 +1,257 @@
use std::collections::{HashMap, VecDeque};
use std::net::{IpAddr, SocketAddr};
use std::ops::Range;
use std::sync::Arc;
use std::time::Instant;
use anyhow::Context;
use bytes::Bytes;
use priority_queue::double_priority_queue::DoublePriorityQueue;
use rand::seq::SliceRandom;
use rand::thread_rng;
use tokio::net::UdpSocket;
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::{Bus, Event};
use crate::virtual_iface::VirtualPort;
const MAX_PACKET: usize = 65536;
const MIN_PORT: u16 = 1000;
const MAX_PORT: u16 = 60999;
const PORT_RANGE: Range<u16> = MIN_PORT..MAX_PORT;
/// How long to keep the UDP peer address assigned to its virtual specified port, in seconds.
/// TODO: Make this configurable by the CLI
const UDP_TIMEOUT_SECONDS: u64 = 60;
/// To prevent port-flooding, we set a limit on the amount of open ports per IP address.
/// TODO: Make this configurable by the CLI
const PORTS_PER_IP: usize = 100;
/// Starts the server that listens on UDP datagrams.
pub async fn udp_proxy_server(
port_forward: PortForwardConfig,
port_pool: UdpPortPool,
bus: Bus,
) -> anyhow::Result<()> {
let mut endpoint = bus.new_endpoint();
let socket = UdpSocket::bind(port_forward.source)
.await
.context("Failed to bind on UDP proxy address")?;
let mut buffer = [0u8; MAX_PACKET];
loop {
tokio::select! {
to_send_result = next_udp_datagram(&socket, &mut buffer, port_pool.clone()) => {
match to_send_result {
Ok(Some((port, data))) => {
endpoint.send(Event::LocalData(port_forward, port, data));
}
Ok(None) => {
continue;
}
Err(e) => {
error!(
"Failed to read from client UDP socket: {:?}",
e
);
break;
}
}
}
event = endpoint.recv() => {
if let Event::RemoteData(virtual_port, data) = event {
if let Some(peer) = port_pool.get_peer_addr(virtual_port).await {
// Have remote data to send to the local client
if let Err(e) = socket.writable().await {
error!("[{}] Failed to check if writable: {:?}", virtual_port, e);
}
let expected = data.len();
let mut sent = 0;
loop {
if sent >= expected {
break;
}
match socket.send_to(&data[sent..expected], peer).await {
Ok(written) => {
debug!("[{}] Sent {} (expected {}) bytes to local client", virtual_port, written, expected);
sent += written;
if sent < expected {
debug!("[{}] Will try to resend remaining {} bytes to local client", virtual_port, (expected - written));
}
},
Err(e) => {
error!("[{}] Failed to send {} bytes to local client: {:?}", virtual_port, expected, e);
break;
}
}
}
port_pool.update_last_transmit(virtual_port).await;
}
}
}
}
}
Ok(())
}
async fn next_udp_datagram(
socket: &UdpSocket,
buffer: &mut [u8],
port_pool: UdpPortPool,
) -> anyhow::Result<Option<(VirtualPort, Bytes)>> {
let (size, peer_addr) = socket
.recv_from(buffer)
.await
.context("Failed to accept incoming UDP datagram")?;
// Assign a 'virtual port': this is a unique port number used to route IP packets
// received from the WireGuard tunnel. It is the port number that the virtual client will
// listen on.
let port = match port_pool.next(peer_addr).await {
Ok(port) => port,
Err(e) => {
error!(
"Failed to assign virtual port number for UDP datagram from [{}]: {:?}",
peer_addr, e
);
return Ok(None);
}
};
debug!(
"[{}] Received datagram of {} bytes from {}",
port, size, peer_addr
);
port_pool.update_last_transmit(port).await;
let data = buffer[..size].to_vec();
Ok(Some((port, data.into())))
}
/// A pool of virtual ports available for TCP connections.
#[derive(Clone)]
pub struct UdpPortPool {
inner: Arc<tokio::sync::RwLock<UdpPortPoolInner>>,
}
impl Default for UdpPortPool {
fn default() -> Self {
Self::new()
}
}
impl UdpPortPool {
/// Initializes a new pool of virtual ports.
pub fn new() -> Self {
let mut inner = UdpPortPoolInner::default();
let mut ports: Vec<u16> = PORT_RANGE.collect();
ports.shuffle(&mut thread_rng());
ports
.into_iter()
.for_each(|p| inner.queue.push_back(p) as ());
Self {
inner: Arc::new(tokio::sync::RwLock::new(inner)),
}
}
/// Requests a free port from the pool. An error is returned if none is available (exhausted max capacity).
pub async fn next(&self, peer_addr: SocketAddr) -> anyhow::Result<VirtualPort> {
// A port found to be reused. This is outside of the block because the read lock cannot be upgraded to a write lock.
let mut port_reuse: Option<u16> = None;
{
let inner = self.inner.read().await;
if let Some(port) = inner.port_by_peer_addr.get(&peer_addr) {
return Ok(VirtualPort::new(*port, PortProtocol::Udp));
}
// Count how many ports are being used by the peer IP
let peer_ip = peer_addr.ip();
let peer_port_count = inner
.peer_port_usage
.get(&peer_ip)
.map(|v| v.len())
.unwrap_or_default();
if peer_port_count >= PORTS_PER_IP {
// Return least recently used port in this IP's pool
port_reuse = Some(
*(inner
.peer_port_usage
.get(&peer_ip)
.unwrap()
.peek_min()
.unwrap()
.0),
);
warn!(
"Peer [{}] is re-using active virtual port {} due to self-exhaustion.",
peer_addr,
port_reuse.unwrap()
);
}
}
let mut inner = self.inner.write().await;
let port = port_reuse
.or_else(|| inner.queue.pop_front())
.or_else(|| {
// If there is no port to reuse, and the port pool is exhausted, take the last recently used port overall,
// as long as the last transmission exceeds the deadline
let last: (&u16, &Instant) = inner.port_usage.peek_min().unwrap();
if Instant::now().duration_since(*last.1).as_secs() > UDP_TIMEOUT_SECONDS {
warn!(
"Peer [{}] is re-using inactive virtual port {} due to global exhaustion.",
peer_addr, last.0
);
Some(*last.0)
} else {
None
}
})
.context("Virtual port pool is exhausted")?;
inner.port_by_peer_addr.insert(peer_addr, port);
inner.peer_addr_by_port.insert(port, peer_addr);
Ok(VirtualPort::new(port, PortProtocol::Udp))
}
/// Notify that the given virtual port has received or transmitted a UDP datagram.
pub async fn update_last_transmit(&self, port: VirtualPort) {
let mut inner = self.inner.write().await;
if let Some(peer) = inner.peer_addr_by_port.get(&port.num()).copied() {
let pq: &mut DoublePriorityQueue<u16, Instant> = inner
.peer_port_usage
.entry(peer.ip())
.or_insert_with(Default::default);
pq.push(port.num(), Instant::now());
}
let pq: &mut DoublePriorityQueue<u16, Instant> = &mut inner.port_usage;
pq.push(port.num(), Instant::now());
}
pub async fn get_peer_addr(&self, port: VirtualPort) -> Option<SocketAddr> {
let inner = self.inner.read().await;
inner.peer_addr_by_port.get(&port.num()).copied()
}
}
/// Non thread-safe inner logic for UDP port pool.
#[derive(Debug, Default)]
struct UdpPortPoolInner {
/// Remaining ports in the pool.
queue: VecDeque<u16>,
/// The port assigned by peer IP/port. This is used to lookup an existing virtual port
/// for an incoming UDP datagram.
port_by_peer_addr: HashMap<SocketAddr, u16>,
/// The socket address assigned to a peer IP/port. This is used to send a UDP datagram to
/// the real peer address, given the virtual port.
peer_addr_by_port: HashMap<u16, SocketAddr>,
/// Keeps an ordered map of the most recently used virtual ports by a peer (client) IP.
peer_port_usage: HashMap<IpAddr, DoublePriorityQueue<u16, Instant>>,
/// Keeps an ordered map of the most recently used virtual ports in general.
port_usage: DoublePriorityQueue<u16, Instant>,
}

View file

@ -1,102 +1,136 @@
use crate::wg::WireGuardTunnel;
use anyhow::Context;
use smoltcp::phy::{Device, DeviceCapabilities, Medium};
use smoltcp::time::Instant;
use std::sync::Arc;
use crate::config::PortProtocol;
use crate::events::{BusSender, Event};
use crate::Bus;
use bytes::{BufMut, Bytes, BytesMut};
use smoltcp::{
phy::{DeviceCapabilities, Medium},
time::Instant,
};
use std::{
collections::VecDeque,
sync::{Arc, Mutex},
};
/// A virtual device that processes IP packets. IP packets received from the WireGuard endpoint
/// are made available to this device using a channel receiver. IP packets sent from this device
/// are asynchronously sent out to the WireGuard tunnel.
/// A virtual device that processes IP packets through smoltcp and WireGuard.
pub struct VirtualIpDevice {
/// Tunnel to send IP packets to.
wg: Arc<WireGuardTunnel>,
/// Max transmission unit (bytes)
max_transmission_unit: usize,
/// Channel receiver for received IP packets.
ip_dispatch_rx: tokio::sync::mpsc::Receiver<Vec<u8>>,
bus_sender: BusSender,
/// Local queue for packets received from the bus that need to go through the smoltcp interface.
process_queue: Arc<Mutex<VecDeque<Bytes>>>,
}
impl VirtualIpDevice {
pub fn new(virtual_port: u16, wg: Arc<WireGuardTunnel>) -> anyhow::Result<Self> {
let ip_dispatch_rx = wg
.register_virtual_interface(virtual_port)
.with_context(|| "Failed to register IP dispatch for virtual interface")?;
/// Initializes a new virtual IP device.
pub fn new(protocol: PortProtocol, bus: Bus, max_transmission_unit: usize) -> Self {
let mut bus_endpoint = bus.new_endpoint();
let bus_sender = bus_endpoint.sender();
let process_queue = Arc::new(Mutex::new(VecDeque::new()));
Ok(Self { wg, ip_dispatch_rx })
{
let process_queue = process_queue.clone();
tokio::spawn(async move {
loop {
match bus_endpoint.recv().await {
Event::InboundInternetPacket(ip_proto, data) if ip_proto == protocol => {
let mut queue = process_queue
.lock()
.expect("Failed to acquire process queue lock");
queue.push_back(data);
bus_endpoint.send(Event::VirtualDeviceFed(ip_proto));
}
_ => {}
}
}
});
}
pub async fn new_sink(wg: Arc<WireGuardTunnel>) -> anyhow::Result<Self> {
let ip_dispatch_rx = wg
.register_sink_interface()
.await
.with_context(|| "Failed to register IP dispatch for sink virtual interface")?;
Ok(Self { wg, ip_dispatch_rx })
Self {
bus_sender,
process_queue,
max_transmission_unit,
}
}
}
impl<'a> Device<'a> for VirtualIpDevice {
type RxToken = RxToken;
type TxToken = TxToken;
impl smoltcp::phy::Device for VirtualIpDevice {
type RxToken<'a>
= RxToken
where
Self: 'a;
type TxToken<'a>
= TxToken
where
Self: 'a;
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
match self.ip_dispatch_rx.try_recv() {
Ok(buffer) => Some((
Self::RxToken { buffer },
fn receive(&mut self, _timestamp: Instant) -> Option<(Self::RxToken<'_>, Self::TxToken<'_>)> {
let next = {
let mut queue = self
.process_queue
.lock()
.expect("Failed to acquire process queue lock");
queue.pop_front()
};
match next {
Some(buffer) => Some((
Self::RxToken {
buffer: {
let mut buf = BytesMut::new();
buf.put(buffer);
buf
},
},
Self::TxToken {
wg: self.wg.clone(),
sender: self.bus_sender.clone(),
},
)),
Err(_) => None,
None => None,
}
}
fn transmit(&'a mut self) -> Option<Self::TxToken> {
fn transmit(&mut self, _timestamp: Instant) -> Option<Self::TxToken<'_>> {
Some(TxToken {
wg: self.wg.clone(),
sender: self.bus_sender.clone(),
})
}
fn capabilities(&self) -> DeviceCapabilities {
let mut cap = DeviceCapabilities::default();
cap.medium = Medium::Ip;
cap.max_transmission_unit = 1420;
cap.max_transmission_unit = self.max_transmission_unit;
cap
}
}
#[doc(hidden)]
pub struct RxToken {
buffer: Vec<u8>,
buffer: BytesMut,
}
impl smoltcp::phy::RxToken for RxToken {
fn consume<R, F>(mut self, _timestamp: Instant, f: F) -> smoltcp::Result<R>
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
F: FnOnce(&[u8]) -> R,
{
f(&mut self.buffer)
f(&self.buffer)
}
}
#[doc(hidden)]
pub struct TxToken {
wg: Arc<WireGuardTunnel>,
sender: BusSender,
}
impl smoltcp::phy::TxToken for TxToken {
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> smoltcp::Result<R>
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> smoltcp::Result<R>,
F: FnOnce(&mut [u8]) -> R,
{
let mut buffer = Vec::new();
buffer.resize(len, 0);
let mut buffer = vec![0; len];
let result = f(&mut buffer);
tokio::spawn(async move {
match self.wg.send_ip_packet(&buffer).await {
Ok(_) => {}
Err(e) => {
error!("Failed to send IP packet to WireGuard endpoint: {:?}", e);
}
}
});
self.sender
.send(Event::OutboundInternetPacket(buffer.into()));
result
}
}

65
src/virtual_iface/mod.rs Normal file
View file

@ -0,0 +1,65 @@
pub mod tcp;
pub mod udp;
use crate::config::PortProtocol;
use crate::VirtualIpDevice;
use async_trait::async_trait;
use std::fmt::{Display, Formatter};
#[async_trait]
pub trait VirtualInterfacePoll {
/// Initializes the virtual interface and processes incoming data to be dispatched
/// to the WireGuard tunnel and to the real client.
async fn poll_loop(mut self, device: VirtualIpDevice) -> anyhow::Result<()>;
}
/// Virtual port.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)]
pub struct VirtualPort(u16, PortProtocol);
impl VirtualPort {
/// Create a new `VirtualPort` instance, with the given port number and associated protocol.
pub fn new(port: u16, proto: PortProtocol) -> Self {
VirtualPort(port, proto)
}
/// The port number
pub fn num(&self) -> u16 {
self.0
}
/// The protocol of this port.
pub fn proto(&self) -> PortProtocol {
self.1
}
}
impl From<VirtualPort> for u16 {
fn from(port: VirtualPort) -> Self {
port.num()
}
}
impl From<&VirtualPort> for u16 {
fn from(port: &VirtualPort) -> Self {
port.num()
}
}
impl From<VirtualPort> for PortProtocol {
fn from(port: VirtualPort) -> Self {
port.proto()
}
}
impl From<&VirtualPort> for PortProtocol {
fn from(port: &VirtualPort) -> Self {
port.proto()
}
}
impl Display for VirtualPort {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "[{}:{}]", self.num(), self.proto())
}
}

257
src/virtual_iface/tcp.rs Normal file
View file

@ -0,0 +1,257 @@
use crate::config::{PortForwardConfig, PortProtocol};
use crate::events::Event;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::Bus;
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use smoltcp::iface::PollResult;
use smoltcp::{
iface::{Config, Interface, SocketHandle, SocketSet},
socket::tcp,
time::Instant,
wire::{HardwareAddress, IpAddress, IpCidr, IpVersion},
};
use std::{
collections::{HashMap, HashSet, VecDeque},
net::IpAddr,
time::Duration,
};
const MAX_PACKET: usize = 65536;
/// A virtual interface for proxying Layer 7 data to Layer 3 packets, and vice-versa.
pub struct TcpVirtualInterface {
source_peer_ip: IpAddr,
port_forwards: Vec<PortForwardConfig>,
bus: Bus,
sockets: SocketSet<'static>,
}
impl TcpVirtualInterface {
/// Initialize the parameters for a new virtual interface.
/// Use the `poll_loop()` future to start the virtual interface poll loop.
pub fn new(port_forwards: Vec<PortForwardConfig>, bus: Bus, source_peer_ip: IpAddr) -> Self {
Self {
port_forwards: port_forwards
.into_iter()
.filter(|f| matches!(f.protocol, PortProtocol::Tcp))
.collect(),
source_peer_ip,
bus,
sockets: SocketSet::new([]),
}
}
fn new_server_socket(port_forward: PortForwardConfig) -> anyhow::Result<tcp::Socket<'static>> {
static mut TCP_SERVER_RX_DATA: [u8; 0] = [];
static mut TCP_SERVER_TX_DATA: [u8; 0] = [];
let tcp_rx_buffer = tcp::SocketBuffer::new(unsafe { &mut TCP_SERVER_RX_DATA[..] });
let tcp_tx_buffer = tcp::SocketBuffer::new(unsafe { &mut TCP_SERVER_TX_DATA[..] });
let mut socket = tcp::Socket::new(tcp_rx_buffer, tcp_tx_buffer);
socket
.listen((
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
))
.context("Virtual server socket failed to listen")?;
Ok(socket)
}
fn new_client_socket() -> anyhow::Result<tcp::Socket<'static>> {
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let tcp_rx_buffer = tcp::SocketBuffer::new(rx_data);
let tcp_tx_buffer = tcp::SocketBuffer::new(tx_data);
let socket = tcp::Socket::new(tcp_rx_buffer, tcp_tx_buffer);
Ok(socket)
}
fn addresses(&self) -> Vec<IpCidr> {
let mut addresses = HashSet::new();
addresses.insert(IpAddress::from(self.source_peer_ip));
for config in self.port_forwards.iter() {
addresses.insert(IpAddress::from(config.destination.ip()));
}
addresses
.into_iter()
.map(|addr| IpCidr::new(addr, addr_length(&addr)))
.collect()
}
}
#[async_trait]
impl VirtualInterfacePoll for TcpVirtualInterface {
async fn poll_loop(mut self, mut device: VirtualIpDevice) -> anyhow::Result<()> {
// Create CIDR block for source peer IP + each port forward IP
let addresses = self.addresses();
let config = Config::new(HardwareAddress::Ip);
// Create virtual interface (contains smoltcp state machine)
let mut iface = Interface::new(config, &mut device, Instant::now());
iface.update_ip_addrs(|ip_addrs| {
addresses.into_iter().for_each(|addr| {
ip_addrs
.push(addr)
.expect("maximum number of IPs in TCP interface reached");
});
});
// Create virtual server for each port forward
for port_forward in self.port_forwards.iter() {
let server_socket = TcpVirtualInterface::new_server_socket(*port_forward)?;
self.sockets.add(server_socket);
}
// The next time to poll the interface. Can be None for instant poll.
let mut next_poll: Option<tokio::time::Instant> = None;
// Bus endpoint to read events
let mut endpoint = self.bus.new_endpoint();
// Maps virtual port to its client socket handle
let mut port_client_handle_map: HashMap<VirtualPort, SocketHandle> = HashMap::new();
// Data packets to send from a virtual client
let mut send_queue: HashMap<VirtualPort, VecDeque<Bytes>> = HashMap::new();
loop {
tokio::select! {
_ = match (next_poll, port_client_handle_map.len()) {
(None, 0) => tokio::time::sleep(Duration::MAX),
(None, _) => tokio::time::sleep(Duration::ZERO),
(Some(until), _) => tokio::time::sleep_until(until),
} => {
let loop_start = smoltcp::time::Instant::now();
// Find closed sockets
port_client_handle_map.retain(|virtual_port, client_handle| {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
if client_socket.state() == tcp::State::Closed {
endpoint.send(Event::ClientConnectionDropped(*virtual_port));
send_queue.remove(virtual_port);
self.sockets.remove(*client_handle);
false
} else {
// Not closed, retain
true
}
});
if iface.poll(loop_start, &mut device, &mut self.sockets) == PollResult::SocketStateChanged {
log::trace!("TCP virtual interface polled some packets to be processed");
}
for (virtual_port, client_handle) in port_client_handle_map.iter() {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
if client_socket.can_send() {
if let Some(send_queue) = send_queue.get_mut(virtual_port) {
let to_transfer = send_queue.pop_front();
if let Some(to_transfer_slice) = to_transfer.as_deref() {
let total = to_transfer_slice.len();
match client_socket.send_slice(to_transfer_slice) {
Ok(sent) => {
if sent < total {
// Sometimes only a subset is sent, so the rest needs to be sent on the next poll
let tx_extra = Vec::from(&to_transfer_slice[sent..total]);
send_queue.push_front(tx_extra.into());
}
}
Err(e) => {
error!(
"Failed to send slice via virtual client socket: {:?}", e
);
}
}
} else if client_socket.state() == tcp::State::CloseWait {
client_socket.close();
}
}
}
if client_socket.can_recv() {
match client_socket.recv(|buffer| (buffer.len(), Bytes::from(buffer.to_vec()))) {
Ok(data) => {
debug!("[{}] Received {} bytes from virtual server", virtual_port, data.len());
if !data.is_empty() {
endpoint.send(Event::RemoteData(*virtual_port, data));
}
}
Err(e) => {
error!(
"Failed to read from virtual client socket: {:?}", e
);
}
}
}
}
// The virtual interface determines the next time to poll (this is to reduce unnecessary polls)
next_poll = match iface.poll_delay(loop_start, &self.sockets) {
Some(smoltcp::time::Duration::ZERO) => None,
Some(delay) => {
trace!("TCP Virtual interface delayed next poll by {}", delay);
Some(tokio::time::Instant::now() + Duration::from_millis(delay.total_millis()))
},
None => None,
};
}
event = endpoint.recv() => {
match event {
Event::ClientConnectionInitiated(port_forward, virtual_port) => {
let client_socket = TcpVirtualInterface::new_client_socket()?;
let client_handle = self.sockets.add(client_socket);
// Add handle to map
port_client_handle_map.insert(virtual_port, client_handle);
send_queue.insert(virtual_port, VecDeque::new());
let client_socket = self.sockets.get_mut::<tcp::Socket>(client_handle);
let context = iface.context();
client_socket
.connect(
context,
(
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
),
(IpAddress::from(self.source_peer_ip), virtual_port.num()),
)
.context("Virtual server socket failed to listen")?;
next_poll = None;
}
Event::ClientConnectionDropped(virtual_port) => {
if let Some(client_handle) = port_client_handle_map.get(&virtual_port) {
let client_socket = self.sockets.get_mut::<tcp::Socket>(*client_handle);
client_socket.close();
next_poll = None;
}
}
Event::LocalData(_, virtual_port, data) if send_queue.contains_key(&virtual_port) => {
if let Some(send_queue) = send_queue.get_mut(&virtual_port) {
send_queue.push_back(data);
next_poll = None;
}
}
Event::VirtualDeviceFed(PortProtocol::Tcp) => {
next_poll = None;
}
_ => {}
}
}
}
}
}
}
const fn addr_length(addr: &IpAddress) -> u8 {
match addr.version() {
IpVersion::Ipv4 => 32,
IpVersion::Ipv6 => 128,
}
}

227
src/virtual_iface/udp.rs Normal file
View file

@ -0,0 +1,227 @@
use crate::config::PortForwardConfig;
use crate::events::Event;
use crate::virtual_device::VirtualIpDevice;
use crate::virtual_iface::{VirtualInterfacePoll, VirtualPort};
use crate::{Bus, PortProtocol};
use anyhow::Context;
use async_trait::async_trait;
use bytes::Bytes;
use smoltcp::iface::PollResult;
use smoltcp::{
iface::{Config, Interface, SocketHandle, SocketSet},
socket::udp::{self, UdpMetadata},
time::Instant,
wire::{HardwareAddress, IpAddress, IpCidr, IpVersion},
};
use std::{
collections::{HashMap, HashSet, VecDeque},
net::IpAddr,
time::Duration,
};
const MAX_PACKET: usize = 65536;
pub struct UdpVirtualInterface {
source_peer_ip: IpAddr,
port_forwards: Vec<PortForwardConfig>,
bus: Bus,
sockets: SocketSet<'static>,
}
impl UdpVirtualInterface {
/// Initialize the parameters for a new virtual interface.
/// Use the `poll_loop()` future to start the virtual interface poll loop.
pub fn new(port_forwards: Vec<PortForwardConfig>, bus: Bus, source_peer_ip: IpAddr) -> Self {
Self {
port_forwards: port_forwards
.into_iter()
.filter(|f| matches!(f.protocol, PortProtocol::Udp))
.collect(),
source_peer_ip,
bus,
sockets: SocketSet::new([]),
}
}
fn new_server_socket(port_forward: PortForwardConfig) -> anyhow::Result<udp::Socket<'static>> {
static mut UDP_SERVER_RX_META: [udp::PacketMetadata; 0] = [];
static mut UDP_SERVER_RX_DATA: [u8; 0] = [];
static mut UDP_SERVER_TX_META: [udp::PacketMetadata; 0] = [];
static mut UDP_SERVER_TX_DATA: [u8; 0] = [];
let udp_rx_buffer =
udp::PacketBuffer::new(unsafe { &mut UDP_SERVER_RX_META[..] }, unsafe {
&mut UDP_SERVER_RX_DATA[..]
});
let udp_tx_buffer =
udp::PacketBuffer::new(unsafe { &mut UDP_SERVER_TX_META[..] }, unsafe {
&mut UDP_SERVER_TX_DATA[..]
});
let mut socket = udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((
IpAddress::from(port_forward.destination.ip()),
port_forward.destination.port(),
))
.context("UDP virtual server socket failed to bind")?;
Ok(socket)
}
fn new_client_socket(
source_peer_ip: IpAddr,
client_port: VirtualPort,
) -> anyhow::Result<udp::Socket<'static>> {
let rx_meta = vec![udp::PacketMetadata::EMPTY; 10];
let tx_meta = vec![udp::PacketMetadata::EMPTY; 10];
let rx_data = vec![0u8; MAX_PACKET];
let tx_data = vec![0u8; MAX_PACKET];
let udp_rx_buffer = udp::PacketBuffer::new(rx_meta, rx_data);
let udp_tx_buffer = udp::PacketBuffer::new(tx_meta, tx_data);
let mut socket = udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
socket
.bind((IpAddress::from(source_peer_ip), client_port.num()))
.context("UDP virtual client failed to bind")?;
Ok(socket)
}
fn addresses(&self) -> Vec<IpCidr> {
let mut addresses = HashSet::new();
addresses.insert(IpAddress::from(self.source_peer_ip));
for config in self.port_forwards.iter() {
addresses.insert(IpAddress::from(config.destination.ip()));
}
addresses
.into_iter()
.map(|addr| IpCidr::new(addr, addr_length(&addr)))
.collect()
}
}
#[async_trait]
impl VirtualInterfacePoll for UdpVirtualInterface {
async fn poll_loop(mut self, mut device: VirtualIpDevice) -> anyhow::Result<()> {
// Create CIDR block for source peer IP + each port forward IP
let addresses = self.addresses();
let config = Config::new(HardwareAddress::Ip);
// Create virtual interface (contains smoltcp state machine)
let mut iface = Interface::new(config, &mut device, Instant::now());
iface.update_ip_addrs(|ip_addrs| {
addresses.into_iter().for_each(|addr| {
ip_addrs
.push(addr)
.expect("maximum number of IPs in UDP interface reached");
});
});
// Create virtual server for each port forward
for port_forward in self.port_forwards.iter() {
let server_socket = UdpVirtualInterface::new_server_socket(*port_forward)?;
self.sockets.add(server_socket);
}
// The next time to poll the interface. Can be None for instant poll.
let mut next_poll: Option<tokio::time::Instant> = None;
// Bus endpoint to read events
let mut endpoint = self.bus.new_endpoint();
// Maps virtual port to its client socket handle
let mut port_client_handle_map: HashMap<VirtualPort, SocketHandle> = HashMap::new();
// Data packets to send from a virtual client
let mut send_queue: HashMap<VirtualPort, VecDeque<(PortForwardConfig, Bytes)>> =
HashMap::new();
loop {
tokio::select! {
_ = match (next_poll, port_client_handle_map.len()) {
(None, 0) => tokio::time::sleep(Duration::MAX),
(None, _) => tokio::time::sleep(Duration::ZERO),
(Some(until), _) => tokio::time::sleep_until(until),
} => {
let loop_start = smoltcp::time::Instant::now();
if iface.poll(loop_start, &mut device, &mut self.sockets) == PollResult::SocketStateChanged {
log::trace!("UDP virtual interface polled some packets to be processed");
}
for (virtual_port, client_handle) in port_client_handle_map.iter() {
let client_socket = self.sockets.get_mut::<udp::Socket>(*client_handle);
if client_socket.can_send() {
if let Some(send_queue) = send_queue.get_mut(virtual_port) {
let to_transfer = send_queue.pop_front();
if let Some((port_forward, data)) = to_transfer {
client_socket
.send_slice(
&data,
UdpMetadata::from(port_forward.destination),
)
.unwrap_or_else(|e| {
error!(
"[{}] Failed to send data to virtual server: {:?}",
virtual_port, e
);
});
}
}
}
if client_socket.can_recv() {
match client_socket.recv() {
Ok((data, _peer)) => {
if !data.is_empty() {
endpoint.send(Event::RemoteData(*virtual_port, data.to_vec().into()));
}
}
Err(e) => {
error!(
"Failed to read from virtual client socket: {:?}", e
);
}
}
}
}
// The virtual interface determines the next time to poll (this is to reduce unnecessary polls)
next_poll = match iface.poll_delay(loop_start, &self.sockets) {
Some(smoltcp::time::Duration::ZERO) => None,
Some(delay) => {
trace!("UDP Virtual interface delayed next poll by {}", delay);
Some(tokio::time::Instant::now() + Duration::from_millis(delay.total_millis()))
},
None => None,
};
}
event = endpoint.recv() => {
match event {
Event::LocalData(port_forward, virtual_port, data) => {
if let Some(send_queue) = send_queue.get_mut(&virtual_port) {
// Client socket already exists
send_queue.push_back((port_forward, data));
} else {
// Client socket does not exist
let client_socket = UdpVirtualInterface::new_client_socket(self.source_peer_ip, virtual_port)?;
let client_handle = self.sockets.add(client_socket);
// Add handle to map
port_client_handle_map.insert(virtual_port, client_handle);
send_queue.insert(virtual_port, VecDeque::from(vec![(port_forward, data)]));
}
next_poll = None;
}
Event::VirtualDeviceFed(PortProtocol::Udp) => {
next_poll = None;
}
_ => {}
}
}
}
}
}
}
const fn addr_length(addr: &IpAddress) -> u8 {
match addr.version() {
IpVersion::Ipv4 => 32,
IpVersion::Ipv6 => 128,
}
}

243
src/wg.rs
View file

@ -1,54 +1,54 @@
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::net::{IpAddr, SocketAddr};
use std::time::Duration;
use crate::Bus;
use anyhow::Context;
use async_recursion::async_recursion;
use boringtun::noise::errors::WireGuardError;
use boringtun::noise::{Tunn, TunnResult};
use log::Level;
use smoltcp::wire::{IpProtocol, IpVersion, Ipv4Packet, Ipv6Packet, TcpPacket};
use smoltcp::wire::{IpProtocol, IpVersion, Ipv4Packet, Ipv6Packet};
use tokio::net::UdpSocket;
use tokio::sync::RwLock;
use tokio::sync::Mutex;
use crate::config::Config;
use crate::MAX_PACKET;
use crate::config::{Config, PortProtocol};
use crate::events::Event;
/// The capacity of the channel for received IP packets.
const DISPATCH_CAPACITY: usize = 1_000;
pub const DISPATCH_CAPACITY: usize = 1_000;
const MAX_PACKET: usize = 65536;
/// A WireGuard tunnel. Encapsulates and decapsulates IP packets
/// to be sent to and received from a remote UDP endpoint.
/// This tunnel supports at most 1 peer IP at a time, but supports simultaneous ports.
pub struct WireGuardTunnel {
source_peer_ip: IpAddr,
pub(crate) source_peer_ip: IpAddr,
/// `boringtun` peer/tunnel implementation, used for crypto & WG protocol.
peer: Box<Tunn>,
peer: Mutex<Box<Tunn>>,
/// The UDP socket for the public WireGuard endpoint to connect to.
udp: UdpSocket,
/// The address of the public WireGuard endpoint (UDP).
endpoint: SocketAddr,
/// Maps virtual ports to the corresponding IP packet dispatcher.
virtual_port_ip_tx: lockfree::map::Map<u16, tokio::sync::mpsc::Sender<Vec<u8>>>,
/// IP packet dispatcher for unroutable packets. `None` if not initialized.
sink_ip_tx: RwLock<Option<tokio::sync::mpsc::Sender<Vec<u8>>>>,
pub(crate) endpoint: SocketAddr,
/// Event bus
bus: Bus,
}
impl WireGuardTunnel {
/// Initialize a new WireGuard tunnel.
pub async fn new(config: &Config) -> anyhow::Result<Self> {
pub async fn new(config: &Config, bus: Bus) -> anyhow::Result<Self> {
let source_peer_ip = config.source_peer_ip;
let peer = Self::create_tunnel(config)?;
let udp = UdpSocket::bind("0.0.0.0:0")
.await
.with_context(|| "Failed to create UDP socket for WireGuard connection")?;
let peer = Mutex::new(Box::new(Self::create_tunnel(config)?));
let endpoint = config.endpoint_addr;
let virtual_port_ip_tx = lockfree::map::Map::new();
let udp = UdpSocket::bind(config.endpoint_bind_addr)
.await
.context("Failed to create UDP socket for WireGuard connection")?;
Ok(Self {
source_peer_ip,
peer,
udp,
endpoint,
virtual_port_ip_tx,
sink_ip_tx: RwLock::new(None),
bus,
})
}
@ -56,12 +56,16 @@ impl WireGuardTunnel {
pub async fn send_ip_packet(&self, packet: &[u8]) -> anyhow::Result<()> {
trace_ip_packet("Sending IP packet", packet);
let mut send_buf = [0u8; MAX_PACKET];
match self.peer.encapsulate(packet, &mut send_buf) {
let encapsulate_result = {
let mut peer = self.peer.lock().await;
peer.encapsulate(packet, &mut send_buf)
};
match encapsulate_result {
TunnResult::WriteToNetwork(packet) => {
self.udp
.send_to(packet, self.endpoint)
.await
.with_context(|| "Failed to send encrypted IP packet to WireGuard endpoint.")?;
.context("Failed to send encrypted IP packet to WireGuard endpoint.")?;
debug!(
"Sent {} bytes to WireGuard endpoint (encrypted IP packet)",
packet.len()
@ -83,36 +87,20 @@ impl WireGuardTunnel {
Ok(())
}
/// Register a virtual interface (using its assigned virtual port) with the given IP packet `Sender`.
pub fn register_virtual_interface(
&self,
virtual_port: u16,
) -> anyhow::Result<tokio::sync::mpsc::Receiver<Vec<u8>>> {
let existing = self.virtual_port_ip_tx.get(&virtual_port);
if existing.is_some() {
Err(anyhow::anyhow!("Cannot register virtual interface with virtual port {} because it is already registered", virtual_port))
} else {
let (sender, receiver) = tokio::sync::mpsc::channel(DISPATCH_CAPACITY);
self.virtual_port_ip_tx.insert(virtual_port, sender);
Ok(receiver)
pub async fn produce_task(&self) -> ! {
trace!("Starting WireGuard production task");
let mut endpoint = self.bus.new_endpoint();
loop {
if let Event::OutboundInternetPacket(data) = endpoint.recv().await {
match self.send_ip_packet(&data).await {
Ok(_) => {}
Err(e) => {
error!("{:?}", e);
}
}
}
/// Register a virtual interface (using its assigned virtual port) with the given IP packet `Sender`.
pub async fn register_sink_interface(
&self,
) -> anyhow::Result<tokio::sync::mpsc::Receiver<Vec<u8>>> {
let (sender, receiver) = tokio::sync::mpsc::channel(DISPATCH_CAPACITY);
let mut sink_ip_tx = self.sink_ip_tx.write().await;
*sink_ip_tx = Some(sender);
Ok(receiver)
}
/// Releases the virtual interface from IP dispatch.
pub fn release_virtual_interface(&self, virtual_port: u16) {
self.virtual_port_ip_tx.remove(&virtual_port);
}
/// WireGuard Routine task. Handles Handshake, keep-alive, etc.
@ -121,7 +109,14 @@ impl WireGuardTunnel {
loop {
let mut send_buf = [0u8; MAX_PACKET];
match self.peer.update_timers(&mut send_buf) {
let tun_result = { self.peer.lock().await.update_timers(&mut send_buf) };
self.handle_routine_tun_result(tun_result).await;
}
}
#[async_recursion]
async fn handle_routine_tun_result<'a: 'async_recursion>(&self, result: TunnResult<'a>) -> () {
match result {
TunnResult::WriteToNetwork(packet) => {
debug!(
"Sending routine packet of {} bytes to WireGuard endpoint",
@ -137,6 +132,18 @@ impl WireGuardTunnel {
}
};
}
TunnResult::Err(WireGuardError::ConnectionExpired) => {
warn!("Wireguard handshake has expired!");
let mut buf = vec![0u8; MAX_PACKET];
let result = self
.peer
.lock()
.await
.format_handshake_initiation(&mut buf[..], false);
self.handle_routine_tun_result(result).await
}
TunnResult::Err(e) => {
error!(
"Failed to prepare routine packet for WireGuard endpoint: {:?}",
@ -150,14 +157,14 @@ impl WireGuardTunnel {
other => {
warn!("Unexpected WireGuard routine task state: {:?}", other);
}
}
}
};
}
/// WireGuard consumption task. Receives encrypted packets from the WireGuard endpoint,
/// decapsulates them, and dispatches newly received IP packets.
pub async fn consume_task(&self) -> ! {
trace!("Starting WireGuard consumption task");
let endpoint = self.bus.new_endpoint();
loop {
let mut recv_buf = [0u8; MAX_PACKET];
@ -174,7 +181,11 @@ impl WireGuardTunnel {
};
let data = &recv_buf[..size];
match self.peer.decapsulate(None, data, &mut send_buf) {
let decapsulate_result = {
let mut peer = self.peer.lock().await;
peer.decapsulate(None, data, &mut send_buf)
};
match decapsulate_result {
TunnResult::WriteToNetwork(packet) => {
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
@ -183,9 +194,10 @@ impl WireGuardTunnel {
continue;
}
};
let mut peer = self.peer.lock().await;
loop {
let mut send_buf = [0u8; MAX_PACKET];
match self.peer.decapsulate(None, &[], &mut send_buf) {
match peer.decapsulate(None, &[], &mut send_buf) {
TunnResult::WriteToNetwork(packet) => {
match self.udp.send_to(packet, self.endpoint).await {
Ok(_) => {}
@ -210,38 +222,8 @@ impl WireGuardTunnel {
// For debugging purposes: parse packet
trace_ip_packet("Received IP packet", packet);
match self.route_ip_packet(packet) {
RouteResult::Dispatch(port) => {
let sender = self.virtual_port_ip_tx.get(&port);
if let Some(sender_guard) = sender {
let sender = sender_guard.val();
match sender.send(packet.to_vec()).await {
Ok(_) => {
trace!(
"Dispatched received IP packet to virtual port {}",
port
);
}
Err(e) => {
error!(
"Failed to dispatch received IP packet to virtual port {}: {}",
port, e
);
}
}
} else {
warn!("[{}] Race condition: failed to get virtual port sender after it was dispatched", port);
}
}
RouteResult::Sink => {
trace!("Sending unroutable IP packet received from WireGuard endpoint to sink interface");
self.route_ip_sink(packet).await.unwrap_or_else(|e| {
error!("Failed to send unroutable IP packet to sink: {:?}", e)
});
}
RouteResult::Drop => {
trace!("Dropped unroutable IP packet received from WireGuard endpoint");
}
if let Some(proto) = self.route_protocol(packet) {
endpoint.send(Event::InboundInternetPacket(proto, packet.to_vec().into()));
}
}
_ => {}
@ -249,78 +231,46 @@ impl WireGuardTunnel {
}
}
fn create_tunnel(config: &Config) -> anyhow::Result<Box<Tunn>> {
fn create_tunnel(config: &Config) -> anyhow::Result<Tunn> {
let private = config.private_key.as_ref().clone();
let public = *config.endpoint_public_key.as_ref();
Tunn::new(
config.private_key.clone(),
config.endpoint_public_key.clone(),
None,
private,
public,
config.preshared_key,
config.keepalive_seconds,
0,
None,
)
.map_err(|s| anyhow::anyhow!("{}", s))
.with_context(|| "Failed to initialize boringtun Tunn")
.context("Failed to initialize boringtun Tunn")
}
/// Makes a decision on the handling of an incoming IP packet.
fn route_ip_packet(&self, packet: &[u8]) -> RouteResult {
/// Determine the inner protocol of the incoming IP packet (TCP/UDP).
fn route_protocol(&self, packet: &[u8]) -> Option<PortProtocol> {
match IpVersion::of_packet(packet) {
Ok(IpVersion::Ipv4) => Ipv4Packet::new_checked(&packet)
.ok()
// Only care if the packet is destined for this tunnel
.filter(|packet| Ipv4Addr::from(packet.dst_addr()) == self.source_peer_ip)
.map(|packet| match packet.protocol() {
IpProtocol::Tcp => Some(self.route_tcp_segment(packet.payload())),
.filter(|packet| packet.dst_addr() == self.source_peer_ip)
.and_then(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(PortProtocol::Tcp),
IpProtocol::Udp => Some(PortProtocol::Udp),
// Unrecognized protocol, so we cannot determine where to route
_ => Some(RouteResult::Drop),
})
.flatten()
.unwrap_or(RouteResult::Drop),
_ => None,
}),
Ok(IpVersion::Ipv6) => Ipv6Packet::new_checked(&packet)
.ok()
// Only care if the packet is destined for this tunnel
.filter(|packet| Ipv6Addr::from(packet.dst_addr()) == self.source_peer_ip)
.map(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(self.route_tcp_segment(packet.payload())),
.filter(|packet| packet.dst_addr() == self.source_peer_ip)
.and_then(|packet| match packet.next_header() {
IpProtocol::Tcp => Some(PortProtocol::Tcp),
IpProtocol::Udp => Some(PortProtocol::Udp),
// Unrecognized protocol, so we cannot determine where to route
_ => Some(RouteResult::Drop),
})
.flatten()
.unwrap_or(RouteResult::Drop),
_ => RouteResult::Drop,
}
}
/// Makes a decision on the handling of an incoming TCP segment.
fn route_tcp_segment(&self, segment: &[u8]) -> RouteResult {
TcpPacket::new_checked(segment)
.ok()
.map(|tcp| {
if self.virtual_port_ip_tx.get(&tcp.dst_port()).is_some() {
RouteResult::Dispatch(tcp.dst_port())
} else if tcp.rst() {
RouteResult::Drop
} else {
RouteResult::Sink
}
})
.unwrap_or(RouteResult::Drop)
}
/// Route a packet to the IP sink interface.
async fn route_ip_sink(&self, packet: &[u8]) -> anyhow::Result<()> {
let ip_sink_tx = self.sink_ip_tx.read().await;
if let Some(ip_sink_tx) = &*ip_sink_tx {
ip_sink_tx
.send(packet.to_vec())
.await
.with_context(|| "Failed to dispatch IP packet to sink interface")
} else {
warn!(
"Could not dispatch unroutable IP packet to sink because interface is not active."
);
Ok(())
_ => None,
}),
_ => None,
}
}
}
@ -344,12 +294,3 @@ fn trace_ip_packet(message: &str, packet: &[u8]) {
}
}
}
enum RouteResult {
/// Dispatch the packet to the virtual port.
Dispatch(u16),
/// The packet is not routable, and should be sent to the sink interface.
Sink,
/// The packet is not routable, and can be safely ignored.
Drop,
}