diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..0bdcf83d --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,217 @@ +name: Release + +on: + push: + tags: + - 'v*' + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + name: Build ${{ matrix.target_name }} + runs-on: ubuntu-latest + strategy: + matrix: + include: + - target: x86_64-unknown-linux-gnu + target_name: amd64 + target_name_rpm: x86_64 + musl_target: x86_64-unknown-linux-musl + - target: i686-unknown-linux-gnu + target_name: i386 + target_name_rpm: i686 + musl_target: i686-unknown-linux-musl + - target: aarch64-unknown-linux-gnu + target_name: arm64 + target_name_rpm: aarch64 + musl_target: aarch64-unknown-linux-musl + - target: armv7-unknown-linux-gnueabihf + target_name: armhf + target_name_rpm: "" + musl_target: armv7-unknown-linux-musleabihf + - target: armv5te-unknown-linux-gnueabi + target_name: armel + target_name_rpm: "" + musl_target: armv5te-unknown-linux-musleabi + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: ${{ matrix.target }} + + - name: Install cross-compilation tools + run: | + cargo install cross + cargo install cargo-deb + cargo install cargo-generate-rpm + + - name: Install UPX + run: | + UPX_VERSION=$(grep -e '^upx_version =' Cargo.toml | sed -e 's/upx_version = "\(.*\)"/\1/') + wget https://github.com/upx/upx/releases/download/v${UPX_VERSION}/upx-${UPX_VERSION}-amd64_linux.tar.xz + tar -xJf upx-${UPX_VERSION}-amd64_linux.tar.xz + sudo mv upx-${UPX_VERSION}-amd64_linux/upx /usr/local/bin/ + + - name: Generate manpage + run: | + sudo apt-get install -y asciidoctor + mkdir -p target + asciidoctor -b manpage -o target/vpncloud.1 vpncloud.adoc + gzip -f target/vpncloud.1 + + - name: Get version + id: version + run: | + VERSION=$(grep -e '^version =' Cargo.toml | sed -e 's/version = "\(.*\)"/\1/') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Build packages + run: | + VERSION=${{ steps.version.outputs.version }} + TARGET=${{ matrix.target }} + TARGET_NAME=${{ matrix.target_name }} + TARGET_DIR=target/$TARGET_NAME + MUSL_TARGET=${{ matrix.musl_target }} + MUSL_DIR=target/${TARGET_NAME}-musl + + # Create dist directory + mkdir -p dist + + # Build standard package + echo "Compiling for $TARGET_NAME" + cross build --release --target $TARGET --target-dir $TARGET_DIR + mkdir -p target/$TARGET/release + cp $TARGET_DIR/$TARGET/release/vpncloud target/$TARGET/release/ + + # Build deb package + echo "Building deb package" + cargo deb --no-build --no-strip --target $TARGET + mv target/$TARGET/debian/vpncloud_${VERSION}-1_$TARGET_NAME.deb dist/vpncloud_${VERSION}_${TARGET_NAME}.deb + + # Build rpm package if applicable + if [ -n "${{ matrix.target_name_rpm }}" ]; then + echo "Building rpm package" + cargo generate-rpm --target $TARGET --target-dir $TARGET_DIR + mv $TARGET_DIR/$TARGET/generate-rpm/vpncloud-${VERSION}-1.${{ matrix.target_name_rpm }}.rpm dist/vpncloud_${VERSION}-1.${{ matrix.target_name_rpm }}.rpm + fi + + # Build static binary with musl + echo "Compiling for $TARGET_NAME musl" + cross build --release --features installer --target $MUSL_TARGET --target-dir $MUSL_DIR + upx --lzma $MUSL_DIR/$MUSL_TARGET/release/vpncloud + cp $MUSL_DIR/$MUSL_TARGET/release/vpncloud dist/vpncloud_${VERSION}_static_${TARGET_NAME} + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: packages-${{ matrix.target_name }} + path: dist/ + + release: + name: Create Release + needs: build + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get version + id: version + run: | + VERSION=$(grep -e '^version =' Cargo.toml | sed -e 's/version = "\(.*\)"/\1/') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + + - name: Prepare release assets + run: | + VERSION=${{ steps.version.outputs.version }} + mkdir -p release + + # Copy all packages to release directory + find artifacts -name "*.deb" -exec cp {} release/ \; + find artifacts -name "*.rpm" -exec cp {} release/ \; + find artifacts -name "*static*" -exec cp {} release/ \; + + # Generate SHA256 checksums + cd release + sha256sum * > vpncloud_${VERSION}_SHA256SUMS.txt + + - name: Check GPG key availability + id: gpg_check + run: | + if [ -n "${{ secrets.GPG_PRIVATE_KEY }}" ]; then + echo "available=true" >> $GITHUB_OUTPUT + else + echo "available=false" >> $GITHUB_OUTPUT + echo "::notice::GPG_PRIVATE_KEY not configured, skipping signature" + fi + + - name: Import GPG key + if: steps.gpg_check.outputs.available == 'true' + uses: crazy-max/ghaction-import-gpg@v6 + with: + gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} + passphrase: ${{ secrets.GPG_PASSPHRASE }} + + - name: Sign checksums + if: steps.gpg_check.outputs.available == 'true' + run: | + VERSION=${{ steps.version.outputs.version }} + cd release + gpg --armor --output vpncloud_${VERSION}_SHA256SUMS.txt.asc --detach-sig vpncloud_${VERSION}_SHA256SUMS.txt + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + name: VpnCloud ${{ steps.version.outputs.version }} + draft: false + prerelease: false + generate_release_notes: true + files: | + release/*.deb + release/*.rpm + release/*static* + release/*SHA256SUMS* + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + publish-crate: + name: Publish to crates.io + needs: release + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Check crates.io token + id: crate_check + run: | + if [ -n "${{ secrets.CARGO_REGISTRY_TOKEN }}" ]; then + echo "available=true" >> $GITHUB_OUTPUT + else + echo "available=false" >> $GITHUB_OUTPUT + echo "::notice::CARGO_REGISTRY_TOKEN not configured, skipping crates.io publish" + fi + + - name: Publish to crates.io + if: steps.crate_check.outputs.available == 'true' + run: cargo publish + env: + CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/src/cloud.rs b/src/cloud.rs index 22f786da..6800101b 100644 --- a/src/cloud.rs +++ b/src/cloud.rs @@ -101,7 +101,7 @@ impl GenericCloud, stats_file: Option, - ) -> Self { + ) -> Result { let (learning, broadcast) = match config.mode { Mode::Normal => match config.device_type { Type::Tap => (true, true), @@ -131,7 +131,7 @@ impl GenericCloud GenericCloud GenericCloud Result<(), Error> { debug!("Broadcasting message type {}, {:?} bytes to {} peers", type_, msg.len(), self.peers.len()); + // Reuse a single buffer for all peers to avoid repeated allocations let mut msg_data = MsgBuffer::new(100); for (addr, peer) in &mut self.peers { + // Reset buffer to original message state for each peer msg_data.set_start(msg.get_start()); msg_data.set_length(msg.len()); msg_data.message_mut().clone_from_slice(msg.message()); diff --git a/src/crypto/common.rs b/src/crypto/common.rs index 0e2b7abd..f720c486 100644 --- a/src/crypto/common.rs +++ b/src/crypto/common.rs @@ -34,6 +34,7 @@ const SPEED_TEST_TIME: f32 = 0.02; #[cfg(not(test))] const SPEED_TEST_TIME: f32 = 0.1; +/// Interval in seconds for symmetric key rotation (2 minutes) const ROTATE_INTERVAL: usize = 120; pub trait Payload: Debug + PartialEq + Sized { @@ -144,7 +145,7 @@ impl Crypto { Some(password) => { pbkdf2::derive( pbkdf2::PBKDF2_HMAC_SHA256, - NonZeroU32::new(4096).unwrap(), + NonZeroU32::new(600000).unwrap(), SALT, password.as_bytes(), &mut bytes, diff --git a/src/crypto/core.rs b/src/crypto/core.rs index 0c803945..b635466f 100644 --- a/src/crypto/core.rs +++ b/src/crypto/core.rs @@ -55,8 +55,11 @@ use std::{ use crate::{error::Error, util::MsgBuffer}; +/// Nonce length for AES-GCM and ChaCha20-Poly1305 (96 bits) const NONCE_LEN: usize = 12; +/// Authentication tag length for AES-GCM and ChaCha20-Poly1305 (128 bits) pub const TAG_LEN: usize = 16; +/// Extra bytes for encrypted messages (nonce + tag) pub const EXTRA_LEN: usize = 8; fn random_data(size: usize) -> Vec { diff --git a/src/crypto/init.rs b/src/crypto/init.rs index dde999dd..67c2cc61 100644 --- a/src/crypto/init.rs +++ b/src/crypto/init.rs @@ -81,9 +81,13 @@ pub const STAGE_PENG: u8 = 3; pub const WAITING_TO_CLOSE: u8 = 4; pub const CLOSING: u8 = 5; +/// Maximum number of failed handshake retries before giving up (2 minutes at 1 retry/second) pub const MAX_FAILED_RETRIES: usize = 120; pub const SALTED_NODE_ID_HASH_LEN: usize = 20; + +/// Maximum field length in init messages to prevent DoS attacks +const MAX_FIELD_LENGTH: usize = 65536; // 64KB pub type SaltedNodeIdHash = [u8; SALTED_NODE_ID_HASH_LEN]; #[allow(clippy::large_enum_variant)] @@ -221,6 +225,9 @@ impl InitMsg { algorithms = Some(Algorithms { algorithm_speeds: algos, allow_unencrypted }); } _ => { + if field_len > MAX_FIELD_LENGTH { + return Err(Error::Parse("Field length exceeds maximum allowed size")); + } let mut data = vec![0; field_len]; r.read_exact(&mut data).map_err(|_| Error::Parse("Init message too short"))?; } @@ -459,10 +466,8 @@ impl InitState

{ } fn derive_master_key(&self, algo: &'static Algorithm, privk: EcdhPrivateKey, pubk: &EcdhPublicKey) -> LessSafeKey { - agree_ephemeral(privk, pubk, |k| { - UnboundKey::new(algo, &k[..algo.key_len()]).map(LessSafeKey::new).unwrap() - }) - .unwrap() + agree_ephemeral(privk, pubk, |k| UnboundKey::new(algo, &k[..algo.key_len()]).map(LessSafeKey::new).unwrap()) + .unwrap() } fn create_ecdh_keypair(&self) -> (EcdhPrivateKey, EcdhPublicKey) { diff --git a/src/device.rs b/src/device.rs index e76bb01f..cc435c43 100644 --- a/src/device.rs +++ b/src/device.rs @@ -10,9 +10,9 @@ use std::{ fs::{self, File}, io::{self, BufRead, BufReader, Cursor, Error as IoError, Read, Write}, net::{Ipv4Addr, UdpSocket}, - os::{unix::io::AsRawFd, fd::RawFd}, + os::{fd::RawFd, unix::io::AsRawFd}, str, - str::FromStr + str::FromStr, }; use crate::{crypto, error::Error, util::MsgBuffer}; @@ -23,7 +23,7 @@ static TUNSETIFF: libc::c_ulong = 1074025674; #[derive(Copy, Clone)] struct IfReqDataAddr { af: libc::c_int, - addr: Ipv4Addr + addr: Ipv4Addr, } #[repr(C)] diff --git a/src/main.rs b/src/main.rs index b1d58446..6e496890 100644 --- a/src/main.rs +++ b/src/main.rs @@ -190,8 +190,10 @@ fn run(config: Config, socket: S) { Some(file) } }; - let mut cloud = - GenericCloud::::new(&config, socket, device, port_forwarding, stats_file); + let mut cloud = try_fail!( + GenericCloud::::new(&config, socket, device, port_forwarding, stats_file), + "Failed to create VPN cloud: {}" + ); for mut addr in config.peers { if addr.find(':').unwrap_or(0) <= addr.find(']').unwrap_or(0) { // : not present or only in IPv6 address diff --git a/src/payload.rs b/src/payload.rs index d59d40b4..8a38e83d 100644 --- a/src/payload.rs +++ b/src/payload.rs @@ -3,7 +3,6 @@ // This software is licensed under GPL-3 or newer (see LICENSE.md) use crate::{error::Error, types::Address}; -use std::io::{Cursor, Read}; pub trait Protocol: Sized { fn parse(_: &[u8]) -> Result<(Address, Address), Error>; @@ -23,20 +22,26 @@ impl Protocol for Frame { /// # Errors /// This method will fail when the given data is not a valid ethernet frame. fn parse(data: &[u8]) -> Result<(Address, Address), Error> { - // HOT PATH - let mut cursor = Cursor::new(data); + // HOT PATH - Direct array indexing for better performance + if data.len() < 14 { + return Err(Error::Parse("Frame is too short")); + } let mut src = [0; 16]; let mut dst = [0; 16]; - let mut proto = [0; 2]; - cursor - .read_exact(&mut dst[..6]) - .and_then(|_| cursor.read_exact(&mut src[..6])) - .and_then(|_| cursor.read_exact(&mut proto)) - .map_err(|_| Error::Parse("Frame is too short"))?; - if proto == [0x81, 0x00] { + // Copy destination MAC (bytes 0-5) + dst[..6].copy_from_slice(&data[0..6]); + // Copy source MAC (bytes 6-11) + src[..6].copy_from_slice(&data[6..12]); + // Check for VLAN tag (bytes 12-13) + if data[12] == 0x81 && data[13] == 0x00 { + if data.len() < 18 { + return Err(Error::Parse("Vlan frame is too short")); + } + // Shift MAC addresses to make room for VLAN tag src.copy_within(..6, 2); dst.copy_within(..6, 2); - cursor.read_exact(&mut src[..2]).map_err(|_| Error::Parse("Vlan frame is too short"))?; + // Read VLAN tag (bytes 14-15) + src[..2].copy_from_slice(&data[14..16]); src[0] &= 0x0f; // restrict vlan id to 12 bits dst[..2].copy_from_slice(&src[..2]); if src[0..1] == [0, 0] { diff --git a/src/table.rs b/src/table.rs index f91aecee..995eaf02 100644 --- a/src/table.rs +++ b/src/table.rs @@ -25,17 +25,104 @@ struct ClaimEntry { timeout: Time, } +/// A node in the binary trie for CIDR-based claim lookup +struct TrieNode { + /// Claim entry at this node (if this node represents a complete prefix) + claim: Option, // Index into claims vector + /// Child nodes: [0] for bit 0, [1] for bit 1 + children: [Option>; 2], +} + +impl TrieNode { + fn new() -> Self { + Self { claim: None, children: [None, None] } + } +} + +/// A binary trie for O(prefix_len) CIDR-based claim lookups +struct ClaimTrie { + root: TrieNode, +} + +impl ClaimTrie { + fn new() -> Self { + Self { root: TrieNode::new() } + } + + /// Insert a claim with its index in the claims vector + fn insert(&mut self, range: &Range, claim_idx: usize) { + let addr = &range.base; + let prefix_len = range.prefix_len as usize; + let max_bits = (addr.len as usize) * 8; + let bits_to_check = std::cmp::min(prefix_len, max_bits); + + let mut node = &mut self.root; + for bit_idx in 0..bits_to_check { + let byte_idx = bit_idx / 8; + let bit_pos = 7 - (bit_idx % 8); // MSB first + let bit = ((addr.data[byte_idx] >> bit_pos) & 1) as usize; + + if node.children[bit].is_none() { + node.children[bit] = Some(Box::new(TrieNode::new())); + } + node = node.children[bit].as_mut().unwrap(); + } + node.claim = Some(claim_idx); + } + + /// Find the longest prefix match for the given address + /// Returns the index into the claims vector + fn longest_match(&self, addr: &Address) -> Option { + let mut node = &self.root; + let mut best_match = None; + let max_bits = (addr.len as usize) * 8; + + if node.claim.is_some() { + best_match = node.claim; + } + + for bit_idx in 0..max_bits { + let byte_idx = bit_idx / 8; + let bit_pos = 7 - (bit_idx % 8); // MSB first + let bit = ((addr.data[byte_idx] >> bit_pos) & 1) as usize; + + match &node.children[bit] { + Some(child) => { + node = child; + if node.claim.is_some() { + best_match = node.claim; + } + } + None => break, + } + } + best_match + } + + fn clear(&mut self) { + self.root = TrieNode::new(); + } +} + pub struct ClaimTable { cache: HashMap, cache_timeout: Duration, claims: Vec, claim_timeout: Duration, + trie: ClaimTrie, _dummy: PhantomData, } impl ClaimTable { pub fn new(cache_timeout: Duration, claim_timeout: Duration) -> Self { - Self { cache: HashMap::default(), cache_timeout, claims: vec![], claim_timeout, _dummy: PhantomData } + Self { + cache: HashMap::default(), + cache_timeout, + claims: vec![], + claim_timeout, + trie: ClaimTrie::new(), + _dummy: PhantomData, + } } pub fn cache(&mut self, addr: Address, peer: SocketAddr) { @@ -96,16 +183,9 @@ impl ClaimTable { if let Some(entry) = self.cache.get(&addr) { return Some(entry.peer); } - // COLD PATH - let mut found = None; - let mut prefix_len = -1; - for entry in &self.claims { - if entry.claim.prefix_len as isize > prefix_len && entry.claim.matches(addr) { - found = Some(entry); - prefix_len = entry.claim.prefix_len as isize; - } - } - if let Some(entry) = found { + // COLD PATH - Use trie for O(prefix_len) lookup instead of O(n) linear scan + if let Some(claim_idx) = self.trie.longest_match(&addr) { + let entry = &self.claims[claim_idx]; self.cache.insert( addr, CacheValue { peer: entry.peer, timeout: min(TS::now() + self.cache_timeout as Time, entry.timeout) }, @@ -119,6 +199,16 @@ impl ClaimTable { let now = TS::now(); self.cache.retain(|_, v| v.timeout >= now); self.claims.retain(|e| e.timeout >= now); + // Rebuild trie after claims cleanup + self.rebuild_trie(); + } + + /// Rebuild the trie from the current claims vector + fn rebuild_trie(&mut self) { + self.trie.clear(); + for (idx, entry) in self.claims.iter().enumerate() { + self.trie.insert(&entry.claim, idx); + } } pub fn cache_len(&self) -> usize { diff --git a/src/tests/mod.rs b/src/tests/mod.rs index 16e2f860..a8c1901f 100644 --- a/src/tests/mod.rs +++ b/src/tests/mod.rs @@ -5,4 +5,4 @@ mod common; mod nat; mod payload; -mod peers; \ No newline at end of file +mod peers; diff --git a/src/traffic.rs b/src/traffic.rs index 2d98f26a..84b98e25 100644 --- a/src/traffic.rs +++ b/src/traffic.rs @@ -81,26 +81,44 @@ pub struct TrafficStats { impl TrafficStats { #[inline] pub fn count_out_traffic(&mut self, peer: SocketAddr, bytes: usize) { - // HOT PATH - self.peers.entry(peer).or_default().count_out(bytes); + // HOT PATH - Use get_mut first for fast path, then entry only on cache miss + if let Some(entry) = self.peers.get_mut(&peer) { + entry.count_out(bytes); + } else { + self.peers.entry(peer).or_default().count_out(bytes); + } } #[inline] pub fn count_in_traffic(&mut self, peer: SocketAddr, bytes: usize) { - // HOT PATH - self.peers.entry(peer).or_default().count_in(bytes); + // HOT PATH - Use get_mut first for fast path, then entry only on cache miss + if let Some(entry) = self.peers.get_mut(&peer) { + entry.count_in(bytes); + } else { + self.peers.entry(peer).or_default().count_in(bytes); + } } #[inline] pub fn count_out_payload(&mut self, remote: Address, local: Address, bytes: usize) { - // HOT PATH - self.payload.entry((remote, local)).or_default().count_out(bytes); + // HOT PATH - Use get_mut first for fast path, then entry only on cache miss + let key = (remote, local); + if let Some(entry) = self.payload.get_mut(&key) { + entry.count_out(bytes); + } else { + self.payload.entry(key).or_default().count_out(bytes); + } } #[inline] pub fn count_in_payload(&mut self, remote: Address, local: Address, bytes: usize) { - // HOT PATH - self.payload.entry((remote, local)).or_default().count_in(bytes); + // HOT PATH - Use get_mut first for fast path, then entry only on cache miss + let key = (remote, local); + if let Some(entry) = self.payload.get_mut(&key) { + entry.count_in(bytes); + } else { + self.payload.entry(key).or_default().count_in(bytes); + } } pub fn count_invalid_protocol(&mut self, bytes: usize) { diff --git a/src/util.rs b/src/util.rs index ef1b6f02..9b2caf75 100644 --- a/src/util.rs +++ b/src/util.rs @@ -24,14 +24,14 @@ pub type Time = i64; #[derive(Clone)] pub struct MsgBuffer { space_before: usize, - buffer: [u8; 65535], + buffer: Box<[u8; 65535]>, start: usize, end: usize, } impl MsgBuffer { pub fn new(space_before: usize) -> Self { - Self { buffer: [0; 65535], space_before, start: space_before, end: space_before } + Self { buffer: Box::new([0; 65535]), space_before, start: space_before, end: space_before } } pub fn get_start(&self) -> usize { diff --git a/src/wsproxy.rs b/src/wsproxy.rs index 0281cfac..8b3f074c 100644 --- a/src/wsproxy.rs +++ b/src/wsproxy.rs @@ -15,7 +15,12 @@ use std::{ os::unix::io::{AsRawFd, RawFd}, thread::spawn, }; -use tungstenite::{connect, protocol::WebSocket, Message, accept, stream::{MaybeTlsStream, NoDelay}}; +use tungstenite::{ + accept, connect, + protocol::WebSocket, + stream::{MaybeTlsStream, NoDelay}, + Message, +}; use url::Url; macro_rules! io_error { @@ -126,7 +131,7 @@ impl AsRawFd for ProxyConnection { fn as_raw_fd(&self) -> RawFd { match self.socket.get_ref() { MaybeTlsStream::Plain(stream) => stream.as_raw_fd(), - _ => unimplemented!() + _ => unimplemented!(), } } }