diff --git a/Cargo.lock b/Cargo.lock index b7ef7b6..30ca745 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -838,8 +838,9 @@ dependencies = [ [[package]] name = "linkem" -version = "0.2.0" +version = "0.2.1" dependencies = [ + "bytes", "futures", "msg-socket", "msg-transport", diff --git a/linkem/Cargo.toml b/linkem/Cargo.toml index f1e5567..614f6c6 100644 --- a/linkem/Cargo.toml +++ b/linkem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "linkem" -version = "0.2.0" +version = "0.2.1" description = "A realistic network emulation library" readme = "README.md" edition.workspace = true @@ -29,6 +29,7 @@ futures.workspace = true rtnetlink = { version = "0.20.0" } [dev-dependencies] +bytes.workspace = true tracing-subscriber = { version = "0.3", features = ["env-filter"] } tokio = { version = "1", features = [ "rt-multi-thread", @@ -38,6 +39,9 @@ tokio = { version = "1", features = [ ] } +[[example]] +name = "pubsub_latency_probe" + [[example]] name = "bdp_throughput" diff --git a/linkem/examples/pubsub_latency_probe.rs b/linkem/examples/pubsub_latency_probe.rs new file mode 100644 index 0000000..3d44831 --- /dev/null +++ b/linkem/examples/pubsub_latency_probe.rs @@ -0,0 +1,288 @@ +//! Pub/Sub Latency Probe Example +//! +//! Creates four network namespaces: one publisher and three subscribers, each with +//! different network impairments. The publisher sends timestamped messages at a fixed +//! rate on a topic, and each subscriber logs the one-way delay for every message. +//! +//! # Running +//! +//! ```bash +//! sudo HOME=$HOME RUST_LOG=info $(which cargo) run --example pubsub_latency_probe -p linkem +//! ``` + +#[cfg(not(target_os = "linux"))] +fn main() {} + +#[cfg(target_os = "linux")] +#[tokio::main] +async fn main() -> Result<(), Box> { + use std::{ + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::{Duration, Instant}, + }; + + use bytes::Bytes; + use futures::StreamExt; + use linkem::{ + ip::Subnet, + network::{Link, Network, PeerIdExt}, + tc::impairment::LinkImpairment, + }; + use msg_socket::{PubSocket, SubSocket}; + use msg_transport::tcp::Tcp; + use tracing_subscriber::EnvFilter; + + /// Latency stats collected by each subscriber. + struct Stats { + name: &'static str, + count: u64, + total: Duration, + min: Duration, + max: Duration, + } + + /// Spawn a subscriber in the given namespace. Returns a future that resolves to [`Stats`]. + #[allow(clippy::too_many_arguments)] + async fn spawn_subscriber( + network: &Network, + sub_id: usize, + pub_addr: IpAddr, + port: u16, + topic: &'static str, + total_messages: u64, + epoch: Instant, + name: &'static str, + ) -> Result< + impl std::future::Future>, + linkem::network::Error, + > { + network + .run_in_namespace(sub_id, move |_| { + Box::pin(async move { + let mut sub_socket = SubSocket::new(Tcp::default()); + sub_socket.connect(SocketAddr::new(pub_addr, port)).await.unwrap(); + sub_socket.subscribe(topic).await.unwrap(); + + let mut count = 0u64; + let mut total_delay = Duration::ZERO; + let mut min_delay = Duration::MAX; + let mut max_delay = Duration::ZERO; + + loop { + match tokio::time::timeout(Duration::from_secs(5), sub_socket.next()).await + { + Ok(Some(msg)) => { + let now = epoch.elapsed(); + let payload = msg.payload(); + + if payload.len() != 16 { + continue; + } + + let secs = u64::from_be_bytes(payload[..8].try_into().unwrap()); + let nanos = u64::from_be_bytes(payload[8..16].try_into().unwrap()); + let sent_at = Duration::new(secs, nanos as u32); + + let delay = now.saturating_sub(sent_at); + count += 1; + total_delay += delay; + if delay < min_delay { + min_delay = delay; + } + if delay > max_delay { + max_delay = delay; + } + + tracing::info!( + name, + seq = count, + delay_ms = format_args!("{:.2}", delay.as_secs_f64() * 1000.0), + "received" + ); + + if count == total_messages { + break; + } + } + Ok(None) => break, + Err(_) => break, + } + } + + Stats { name, count, total: total_delay, min: min_delay, max: max_delay } + }) + }) + .await + } + + tracing_subscriber::fmt() + .with_env_filter( + EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")), + ) + .init(); + + // --- Configuration --- + const MESSAGES_PER_SECOND: u64 = 1000; + const TOTAL_MESSAGES: u64 = 1000; + const PORT: u16 = 9900; + const TOPIC: &str = "latency"; + + let impairments: [(&str, LinkImpairment); 3] = [ + ( + "local", + LinkImpairment { + latency: 1_000, // 1ms + jitter: 500, // 0.5ms + ..Default::default() + }, + ), + ( + "regional", + LinkImpairment { + latency: 50_000, // 50ms + jitter: 10_000, // 10ms + ..Default::default() + }, + ), + ( + "overseas", + LinkImpairment { + latency: 120_000, // 120ms + jitter: 20_000, // 20ms + loss: 0.5, // 0.5% loss + ..Default::default() + }, + ), + ]; + + println!("\n=== Pub/Sub Latency Probe (1 pub, 3 subs) ===\n"); + println!(" Rate: {} msg/s", MESSAGES_PER_SECOND); + println!(" Count: {} messages\n", TOTAL_MESSAGES); + println!(" Subscribers:"); + for (name, imp) in &impairments { + println!( + " {:<10} {}ms latency, {}ms jitter, {:.1}% loss", + name, + imp.latency / 1000, + imp.jitter / 1000, + imp.loss + ); + } + println!(); + + // --- Network setup --- + let subnet = Subnet::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 0)), 24); + let mut network = Network::new(subnet).await?; + + let pub_id = network.add_peer().await?; + let pub_addr = pub_id.veth_address(subnet); + println!(" Publisher: peer {} @ {}", pub_id, pub_addr); + + let mut sub_ids: Vec<(usize, &str)> = Vec::new(); + for (name, imp) in &impairments { + let sub_id = network.add_peer().await?; + network.apply_impairment(Link::new(pub_id, sub_id), *imp).await?; + println!(" Sub [{}]: peer {} @ {}", name, sub_id, sub_id.veth_address(subnet)); + sub_ids.push((sub_id, name)); + } + println!(); + + let epoch = Instant::now(); + + // --- Publisher --- + let publisher = network + .run_in_namespace(pub_id, move |_| { + Box::pin(async move { + let mut pub_socket = PubSocket::new(Tcp::default()); + pub_socket.bind(SocketAddr::new(pub_addr, PORT)).await.unwrap(); + + // Wait for all subscribers to connect and subscribe + tokio::time::sleep(Duration::from_secs(1)).await; + + let interval = Duration::from_micros(1_000_000 / MESSAGES_PER_SECOND); + + for seq in 1..=TOTAL_MESSAGES { + let now = epoch.elapsed(); + let mut payload = [0u8; 16]; + payload[..8].copy_from_slice(&now.as_secs().to_be_bytes()); + payload[8..16].copy_from_slice(&(now.subsec_nanos() as u64).to_be_bytes()); + + pub_socket.publish(TOPIC, Bytes::from(payload.to_vec())).await.unwrap(); + tracing::info!(seq, "published"); + + if seq < TOTAL_MESSAGES { + tokio::time::sleep(interval).await; + } + } + + // Give subscribers time to drain + tokio::time::sleep(Duration::from_secs(5)).await; + }) + }) + .await?; + + // Small delay before subscribers connect + tokio::time::sleep(Duration::from_millis(100)).await; + + // --- Subscribers --- + let (sub_1_id, sub_1_name) = sub_ids[0]; + let (sub_2_id, sub_2_name) = sub_ids[1]; + let (sub_3_id, sub_3_name) = sub_ids[2]; + + let sub_1 = spawn_subscriber( + &network, + sub_1_id, + pub_addr, + PORT, + TOPIC, + TOTAL_MESSAGES, + epoch, + sub_1_name, + ) + .await?; + let sub_2 = spawn_subscriber( + &network, + sub_2_id, + pub_addr, + PORT, + TOPIC, + TOTAL_MESSAGES, + epoch, + sub_2_name, + ) + .await?; + let sub_3 = spawn_subscriber( + &network, + sub_3_id, + pub_addr, + PORT, + TOPIC, + TOTAL_MESSAGES, + epoch, + sub_3_name, + ) + .await?; + + // Wait for publisher to finish, then collect subscriber results + publisher.await?; + let (r1, r2, r3) = tokio::join!(sub_1, sub_2, sub_3); + + // --- Summary --- + println!("\n=== Results ===\n"); + for stats in [r1?, r2?, r3?] { + println!(" [{}]", stats.name); + println!(" Received: {}/{} messages", stats.count, TOTAL_MESSAGES); + if stats.count > 0 { + let avg = stats.total / stats.count as u32; + println!( + " Latency: avg {:.2}ms, min {:.2}ms, max {:.2}ms", + avg.as_secs_f64() * 1000.0, + stats.min.as_secs_f64() * 1000.0, + stats.max.as_secs_f64() * 1000.0 + ); + } + } + println!(); + + Ok(()) +} diff --git a/linkem/src/network.rs b/linkem/src/network.rs index ab9fd30..4103bf8 100644 --- a/linkem/src/network.rs +++ b/linkem/src/network.rs @@ -48,7 +48,7 @@ //! - 10ms latency + 100 Mbit/s to peer 2 //! - 200ms latency + 5% loss to peer 3 //! -//! This is achieved using a DRR (Deficit Round Robin) qdisc with per-destination +//! This is achieved using an HTB (Hierarchical Token Bucket) qdisc with per-destination //! classes. See the [`crate::tc`] module for details on the qdisc hierarchy. use std::{ @@ -72,9 +72,9 @@ use tokio::{ }; use tracing::Instrument as _; -use crate::tc::requests::configure_drr_class; +use crate::tc::requests::configure_htb_class; use crate::tc::requests::configure_tbf; -use crate::tc::requests::{configure_flower_filter, configure_netem, install_drr_root}; +use crate::tc::requests::{configure_flower_filter, configure_netem, install_htb_root}; use crate::{ dynch::DynFuture, ip::{IpAddrExt as _, Subnet}, @@ -171,15 +171,15 @@ impl Display for Link { /// outgoing interface, enabling us to add/remove per-destination impairments. #[derive(Debug, Default)] struct PeerTcState { - /// Whether the drr root qdisc has been installed on this peer's veth. + /// Whether the HTB root qdisc has been installed on this peer's veth. /// - /// The drr qdisc is installed lazily on first `apply_impairment()` call. - drr_installed: bool, + /// The HTB qdisc is installed lazily on first `apply_impairment()` call. + htb_installed: bool, /// Set of destination peer IDs that have impairments configured. /// /// For each destination in this set, we have created: - /// - An drr class (for traffic classification) + /// - An HTB class (for traffic classification) /// - Optionally a TBF qdisc (for bandwidth limiting) /// - A netem qdisc (for delay, loss, etc.) /// - A flower filter (to match destination IP) @@ -692,12 +692,12 @@ impl Network { /// /// # Traffic Control Hierarchy /// - /// On first call for a peer, this installs an drr root qdisc. Then for each + /// On first call for a peer, this installs an HTB root qdisc. Then for each /// destination, it creates: /// /// ```text - /// drr root (1:0) - /// └── drr class (1:10+X) for destination peer X + /// HTB root (1:0) + /// └── HTB class (1:10+X) for destination peer X /// └── TBF (10+X:1) [if bandwidth limiting enabled] /// └── netem (10+X:0) [delay, loss, jitter] /// @@ -758,7 +758,7 @@ impl Network { let is_replacement = tc_state.has_impairment_to(link.destination()); let dst_peer_id = dst_peer.id; - let drr_already_installed = tc_state.drr_installed; + let htb_already_installed = tc_state.htb_installed; let subnet = self.subnet; // Execute the TC configuration in the source peer's namespace. @@ -780,13 +780,13 @@ impl Network { .expect("to find dev") .get() as i32; - // Step 1: Install DRR root qdisc if not already present. - if !drr_already_installed { - install_drr_root(&mut ctx.handle, if_index).await?; + // Step 1: Install HTB root qdisc if not already present. + if !htb_already_installed { + install_htb_root(&mut ctx.handle, if_index).await?; } - // Step 2: Create or replace DRR class for this destination. - configure_drr_class(&mut ctx.handle, if_index, dst_peer_id, is_replacement) + // Step 2: Create or replace HTB class for this destination. + configure_htb_class(&mut ctx.handle, if_index, dst_peer_id, is_replacement) .await?; // Step 3: Create or replace TBF qdisc if bandwidth limiting is enabled. @@ -829,7 +829,7 @@ impl Network { // Update state tracking after successful configuration. let tc_state = self.tc_state.get_mut(&link.source()).unwrap(); - tc_state.drr_installed = true; + tc_state.htb_installed = true; tc_state.mark_configured(link.destination()); Ok(()) diff --git a/linkem/src/tc/drr.rs b/linkem/src/tc/drr.rs deleted file mode 100644 index 9029818..0000000 --- a/linkem/src/tc/drr.rs +++ /dev/null @@ -1,158 +0,0 @@ -//! DRR (Deficit Round Robin) qdisc and class support. - -use rtnetlink::packet_core::{ - NLM_F_ACK, NLM_F_CREATE, NLM_F_EXCL, NLM_F_REPLACE, NLM_F_REQUEST, NetlinkMessage, -}; -use rtnetlink::packet_route::{ - RouteNetlinkMessage, - tc::{TcAttribute, TcHandle, TcMessage}, -}; - -use super::handle::QdiscRequestInner; -use super::nla::{build_nested_options, build_nla}; - -// DRR-specific TCA_OPTIONS sub-attributes (from linux/pkt_sched.h) -/// DRR class quantum attribute type. -const TCA_DRR_QUANTUM: u16 = 1; - -/// The default quantum for DRR classes, in bytes. -/// -/// With a large quantum (4GiB of [`u32::MAX`]), DRR effectively becomes a pure packet router—when -/// a class's turn comes, it drains its queue entirely before moving to the next class. This is -/// desirable because we use DRR only for classification; actual rate limiting is done by TBF. -/// -/// The quantum must be at least as large as the maximum packet size (MTU) to ensure packets -/// can always be dequeued. 1MB is large enough to handle any reasonable packet while still -/// being well within safe bounds. -pub const DRR_DEFAULT_QUANTUM: u32 = u32::MAX; // 4GiB - -/// Builder for creating a DRR (Deficit Round Robin) root qdisc. -/// -/// DRR is our root qdisc, chosen because it allows an unlimited number of classes -/// to be created dynamically with minimal overhead. Each class can have its own -/// qdisc chain (TBF → netem) for per-destination impairments. -/// -/// Unlike HTB, DRR doesn't impose bandwidth shaping semantics at the root level. -/// With a large quantum, it acts as a pure packet classifier/router. -#[derive(Debug, Clone)] -pub struct QdiscDrrRequest { - pub inner: QdiscRequestInner, -} - -impl QdiscDrrRequest { - /// Create a new DRR qdisc request for the given interface. - /// - /// The qdisc will be created at the root with handle 1:0. - pub fn new(inner: QdiscRequestInner) -> Self { - Self { inner } - } - - /// Build the netlink message to create this DRR qdisc. - /// - /// DRR qdiscs are simple—they require no special options at creation time. - /// The quantum is specified per-class, not at the qdisc level. - pub fn build(self) -> NetlinkMessage { - let mut tc_message = TcMessage::with_index(self.inner.interface_index); - tc_message.header.parent = TcHandle::ROOT; - tc_message.header.handle = TcHandle::from(0x0001_0000); // 1:0 - - tc_message.attributes.push(TcAttribute::Kind("drr".to_string())); - - let mut nl_req = NetlinkMessage::from(RouteNetlinkMessage::NewQueueDiscipline(tc_message)); - // NLM_F_REPLACE allows updating an existing qdisc - nl_req.header.flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST | NLM_F_ACK; - - nl_req - } -} - -/// Builder for creating a DRR class. -/// -/// Each destination peer gets its own DRR class, which serves as the attachment -/// point for the TBF and netem qdiscs that implement the actual impairments. -/// -/// DRR classes are simple—they only have a `quantum` parameter that controls how -/// many bytes can be sent per scheduling round. With a large quantum (default 1MB), -/// the class effectively drains its entire queue each time it's scheduled, making -/// DRR act as a pure classifier rather than a fair scheduler. -/// -/// # Handle Scheme -/// -/// For a destination peer with ID `N`: -/// - Class handle: `1:(10 + N)` (e.g., peer 2 → class 1:12) -/// -/// # Example -/// -/// ``` -/// use linkem::tc::handle::{ID_OFFSET, QdiscRequestInner}; -/// use linkem::tc::drr::DrrClassRequest; -/// use rtnetlink::packet_route::tc::TcHandle; -/// -/// let if_index = 1; // Network interface index -/// // Create class for traffic to peer 2 -/// let class_minor = ID_OFFSET + 2; // 12 -/// let request = DrrClassRequest::new( -/// QdiscRequestInner::new(if_index) -/// .with_parent(TcHandle::from(0x0001_0000)) // Parent is DRR root (1:0) -/// .with_handle(TcHandle::from((1 << 16) | class_minor)), // 1:12 -/// ).build(); -/// ``` -#[derive(Debug, Clone)] -pub struct DrrClassRequest { - pub inner: QdiscRequestInner, - /// The quantum for this class in bytes. - /// - /// This determines how many bytes can be sent per scheduling round. - /// With a large value (default [`DRR_DEFAULT_QUANTUM`]), the class - /// drains its entire queue each time it's scheduled. - pub quantum: u32, - /// If true, replace an existing class instead of failing if it exists. - pub replace: bool, -} - -impl DrrClassRequest { - /// Create a new DRR class request with default quantum. - pub fn new(inner: QdiscRequestInner) -> Self { - Self { inner, quantum: DRR_DEFAULT_QUANTUM, replace: false } - } - - /// Set a custom quantum for this class. - /// - /// The quantum determines how many bytes can be sent per scheduling round. - /// Must be at least as large as the maximum packet size (MTU). - pub fn with_quantum(mut self, quantum: u32) -> Self { - self.quantum = quantum; - self - } - - /// Set whether to replace an existing class. - /// - /// When `true`, uses `NLM_F_REPLACE` to update an existing class. - /// When `false` (default), uses `NLM_F_EXCL` to fail if the class exists. - pub fn with_replace(mut self, replace: bool) -> Self { - self.replace = replace; - self - } - - /// Build the netlink message to create this DRR class. - pub fn build(self) -> NetlinkMessage { - let mut tc_message = TcMessage::with_index(self.inner.interface_index); - tc_message.header.parent = self.inner.parent; - tc_message.header.handle = self.inner.handle; - - tc_message.attributes.push(TcAttribute::Kind("drr".to_string())); - - // DRR class options: just TCA_DRR_QUANTUM wrapped in TCA_OPTIONS - let quantum_nla = build_nla(TCA_DRR_QUANTUM, &self.quantum.to_ne_bytes()); - tc_message.attributes.push(TcAttribute::Other(build_nested_options(quantum_nla))); - - let mut nl_req = NetlinkMessage::from(RouteNetlinkMessage::NewTrafficClass(tc_message)); - nl_req.header.flags = if self.replace { - NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST | NLM_F_ACK - } else { - NLM_F_CREATE | NLM_F_EXCL | NLM_F_REQUEST | NLM_F_ACK - }; - - nl_req - } -} diff --git a/linkem/src/tc/filter.rs b/linkem/src/tc/filter.rs index 8ecd846..12250fe 100644 --- a/linkem/src/tc/filter.rs +++ b/linkem/src/tc/filter.rs @@ -46,11 +46,11 @@ fn ipv6_mask(prefix_len: u8) -> Ipv6Addr { /// /// Flower filters classify packets based on various criteria. We use them to /// match packets by destination IP address and route them to the appropriate -/// DRR class for impairment. +/// HTB class for impairment. /// /// # How Classification Works /// -/// 1. Packet enters DRR root qdisc +/// 1. Packet enters HTB root qdisc /// 2. Flower filter examines destination IP /// 3. If IP matches → packet goes to the specified class (e.g., 1:12) /// 4. If no match → packet goes to default class (1:1) @@ -67,7 +67,7 @@ fn ipv6_mask(prefix_len: u8) -> Ipv6Addr { /// // Route traffic to 10.0.0.2 into class 1:12 /// let request = FlowerFilterRequest::new( /// QdiscRequestInner::new(if_index) -/// .with_parent(TcHandle::from(0x0001_0000)), // Attach to DRR root +/// .with_parent(TcHandle::from(0x0001_0000)), // Attach to HTB root /// IpAddr::V4(Ipv4Addr::new(10, 0, 0, 2)), /// ) /// .with_class_id(0x0001_000C) // Route to class 1:12 @@ -251,10 +251,10 @@ impl TcU32Key { /// /// # Why This Is Needed /// -/// DRR (unlike HTB) doesn't have a built-in default class mechanism. Packets that -/// don't match any filter are dropped. To handle unclassified traffic (like ARP -/// packets, which don't have IP headers), we need a catch-all filter with a low -/// priority (high number) that catches everything and sends it to class 1:1. +/// While HTB has a built-in `defcls` default class mechanism, we add an explicit +/// catch-all filter as a safety net. This ensures unclassified traffic (like ARP +/// packets, which don't have IP headers) is reliably routed to class 1:1 regardless +/// of the root qdisc's default class handling. /// /// # Why u32 Instead of matchall /// @@ -281,7 +281,7 @@ impl TcU32Key { /// // Create a catch-all filter that sends unmatched traffic to class 1:1 /// let request = U32CatchallFilterRequest::new( /// QdiscRequestInner::new(if_index) -/// .with_parent(TcHandle::from(0x0001_0000)), // Attach to DRR root +/// .with_parent(TcHandle::from(0x0001_0000)), // Attach to HTB root /// ) /// .with_class_id(0x0001_0001) // Route to class 1:1 /// .build(); diff --git a/linkem/src/tc/handle.rs b/linkem/src/tc/handle.rs index ac13df5..2484d59 100644 --- a/linkem/src/tc/handle.rs +++ b/linkem/src/tc/handle.rs @@ -5,7 +5,7 @@ use rtnetlink::packet_route::tc::TcHandle; -/// The offset added to peer IDs to compute DRR class minor numbers. +/// The offset added to peer IDs to compute HTB class minor numbers. /// /// For peer ID `N`, the class minor/major (depending on the qdisc level) is `ID_OFFSET + N`. This /// keeps class 1:1 reserved as the default (unimpaired) class. @@ -47,7 +47,7 @@ impl QdiscRequestInner { } } -/// Compute the DRR class handle for a destination peer. +/// Compute the HTB class handle for a destination peer. /// /// # Handle Format /// @@ -56,10 +56,10 @@ impl QdiscRequestInner { /// # Example /// /// ``` -/// use linkem::tc::handle::drr_class_handle; -/// assert_eq!(drr_class_handle(2), 0x0001_000C); // 1:12 +/// use linkem::tc::handle::htb_class_handle; +/// assert_eq!(htb_class_handle(2), 0x0001_000C); // 1:12 /// ``` -pub fn drr_class_handle(dest_peer_id: usize) -> u32 { +pub fn htb_class_handle(dest_peer_id: usize) -> u32 { let minor = ID_OFFSET + dest_peer_id as u32; (1 << 16) | minor } diff --git a/linkem/src/tc/htb.rs b/linkem/src/tc/htb.rs new file mode 100644 index 0000000..50e2b11 --- /dev/null +++ b/linkem/src/tc/htb.rs @@ -0,0 +1,312 @@ +//! HTB (Hierarchical Token Bucket) qdisc and class support. +//! +//! HTB replaces DRR as the root classifier because it correctly handles +//! non-work-conserving child qdiscs (like netem). When netem returns NULL +//! during dequeue (delay hasn't elapsed), HTB moves to the next class via +//! `htb_next_rb_node()`, preventing head-of-line blocking that occurs with DRR. + +use rtnetlink::packet_core::{ + NLM_F_ACK, NLM_F_CREATE, NLM_F_EXCL, NLM_F_REPLACE, NLM_F_REQUEST, NetlinkMessage, +}; +use rtnetlink::packet_route::{ + RouteNetlinkMessage, + tc::{TcAttribute, TcHandle, TcMessage}, +}; + +use super::core::TICK_IN_USEC; +use super::handle::QdiscRequestInner; +use super::nla::{build_nested_options, build_nla}; +use super::tbf::TcRateSpec; + +// HTB-specific TCA_OPTIONS sub-attributes (from linux/pkt_sched.h) +/// HTB class parameters attribute type. +const TCA_HTB_PARMS: u16 = 1; +/// HTB qdisc initialization attribute type. +const TCA_HTB_INIT: u16 = 2; +/// HTB ceil rate table attribute type. +const TCA_HTB_CTAB: u16 = 3; +/// HTB rate table attribute type. +const TCA_HTB_RTAB: u16 = 4; + +/// HTB protocol version (current kernel version). +const HTB_VERSION: u32 = 3; + +/// Default rate-to-quantum conversion factor. +/// +/// Controls how HTB converts a class's rate to its quantum for deficit +/// round-robin among same-priority classes. The kernel computes: +/// `quantum = rate / rate2quantum`. +const HTB_RATE2QUANTUM: u32 = 10; + +/// Default class minor number for unclassified traffic. +/// +/// HTB's `defcls` parameter routes packets that don't match any filter +/// to this class (1:1), which has no impairments. +const HTB_DEFAULT_CLASS: u32 = 1; + +/// Effectively unlimited rate in bytes per second (~10 Gbit/s). +/// +/// Since we use HTB purely for classification (not rate limiting), +/// we set rate and ceil to ~10 Gbit/s which is effectively unlimited on veth. +/// This value (1,250,000,000) fits in u32, so `TCA_HTB_RATE64` is not needed. +const HTB_UNLIMITED_RATE_BPS: u32 = 1_250_000_000; + +/// Default burst size in bytes (1 MiB). +/// +/// Used for buffer/cbuffer tick calculations. Since we're not actually +/// rate-limiting at the HTB level, this just needs to be large enough +/// to avoid any token bucket starvation. +const HTB_DEFAULT_BURST_BYTES: u32 = 1024 * 1024; + +/// Default rate table (256 x 4-byte zero entries = 1024 bytes). +/// +/// Modern kernels compute rates internally. The zeroed table triggers +/// the kernel's `rtab[0] == 0` fast path in `__detect_linklayer()`, +/// returning `TC_LINKLAYER_ETHERNET` immediately. +const DEFAULT_RATE_TABLE: [u8; 1024] = [0u8; 1024]; + +/// The kernel's `tc_htb_glob` structure for HTB qdisc initialization. +/// +/// Passed inside `TCA_OPTIONS` -> `TCA_HTB_INIT` when creating the root qdisc. +/// +/// # Kernel Definition +/// +/// From ``: +/// +/// ```c +/// struct tc_htb_glob { +/// __u32 version; /* HTB version */ +/// __u32 rate2quantum; /* Rate-to-quantum conversion */ +/// __u32 defcls; /* Default class minor number */ +/// __u32 debug; /* Debug flags */ +/// __u32 direct_pkts; /* Stats: packets sent directly (read-only) */ +/// }; +/// ``` +#[derive(Debug, Clone, Copy)] +struct HtbGlob { + version: u32, + rate2quantum: u32, + defcls: u32, + debug: u32, + direct_pkts: u32, +} + +impl HtbGlob { + fn as_bytes(self) -> Vec { + let mut vec = Vec::with_capacity(20); + vec.extend_from_slice(&self.version.to_ne_bytes()); + vec.extend_from_slice(&self.rate2quantum.to_ne_bytes()); + vec.extend_from_slice(&self.defcls.to_ne_bytes()); + vec.extend_from_slice(&self.debug.to_ne_bytes()); + vec.extend_from_slice(&self.direct_pkts.to_ne_bytes()); + vec + } +} + +impl Default for HtbGlob { + fn default() -> Self { + Self { + version: HTB_VERSION, + rate2quantum: HTB_RATE2QUANTUM, + defcls: HTB_DEFAULT_CLASS, + debug: 0, + direct_pkts: 0, + } + } +} + +/// The kernel's `tc_htb_opt` structure for HTB class configuration. +/// +/// # Kernel Definition +/// +/// From ``: +/// +/// ```c +/// struct tc_htb_opt { +/// struct tc_ratespec rate; /* Guaranteed rate */ +/// struct tc_ratespec ceil; /* Ceiling rate */ +/// __u32 buffer; /* Burst size in ticks */ +/// __u32 cbuffer; /* Ceil burst size in ticks */ +/// __u32 quantum; /* Quantum for deficit round-robin (0 = auto) */ +/// __u32 level; /* Class level (0 = leaf) */ +/// __u32 prio; /* Priority (0 = highest) */ +/// }; +/// ``` +#[derive(Debug, Clone, Copy)] +struct HtbOpt { + rate: TcRateSpec, + ceil: TcRateSpec, + buffer: u32, + cbuffer: u32, + quantum: u32, + level: u32, + prio: u32, +} + +impl HtbOpt { + fn as_bytes(self) -> Vec { + let mut vec = Vec::with_capacity(44); // 2x12 + 5x4 + vec.extend_from_slice(&self.rate.to_bytes()); + vec.extend_from_slice(&self.ceil.to_bytes()); + vec.extend_from_slice(&self.buffer.to_ne_bytes()); + vec.extend_from_slice(&self.cbuffer.to_ne_bytes()); + vec.extend_from_slice(&self.quantum.to_ne_bytes()); + vec.extend_from_slice(&self.level.to_ne_bytes()); + vec.extend_from_slice(&self.prio.to_ne_bytes()); + vec + } +} + +/// Compute buffer ticks for HTB's effectively unlimited rate. +/// +/// Uses the iproute2 formula: +/// `buffer_ticks = burst_bytes * tick_in_usec * 1_000_000 / rate_bytes_per_sec` +fn compute_buffer_ticks() -> u32 { + let tick_in_usec = *TICK_IN_USEC; + (HTB_DEFAULT_BURST_BYTES as f64 * tick_in_usec * 1_000_000.0 + / HTB_UNLIMITED_RATE_BPS as f64) as u32 +} + +/// Builder for creating an HTB (Hierarchical Token Bucket) root qdisc. +/// +/// HTB is our root qdisc, chosen because it correctly handles non-work-conserving +/// child qdiscs (like netem with delay). When netem returns NULL during dequeue, +/// HTB's `htb_dequeue_tree()` moves to the next class, preventing head-of-line +/// blocking. +/// +/// The `defcls=1` parameter routes unclassified traffic to class 1:1 (no impairments). +#[derive(Debug, Clone)] +pub struct QdiscHtbRequest { + pub inner: QdiscRequestInner, +} + +impl QdiscHtbRequest { + /// Create a new HTB qdisc request for the given interface. + /// + /// The qdisc will be created at the root with handle 1:0. + pub fn new(inner: QdiscRequestInner) -> Self { + Self { inner } + } + + /// Build the netlink message to create this HTB qdisc. + /// + /// HTB requires a `TCA_HTB_INIT` attribute containing the `tc_htb_glob` structure + /// with version, rate2quantum, and default class configuration. + pub fn build(self) -> NetlinkMessage { + let mut tc_message = TcMessage::with_index(self.inner.interface_index); + tc_message.header.parent = TcHandle::ROOT; + tc_message.header.handle = TcHandle::from(0x0001_0000); // 1:0 + + tc_message.attributes.push(TcAttribute::Kind("htb".to_string())); + + // HTB init options: tc_htb_glob wrapped in TCA_OPTIONS -> TCA_HTB_INIT + let glob = HtbGlob::default(); + let init_nla = build_nla(TCA_HTB_INIT, &glob.as_bytes()); + tc_message.attributes.push(TcAttribute::Other(build_nested_options(init_nla))); + + let mut nl_req = NetlinkMessage::from(RouteNetlinkMessage::NewQueueDiscipline(tc_message)); + // NLM_F_REPLACE allows updating an existing qdisc + nl_req.header.flags = NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST | NLM_F_ACK; + + nl_req + } +} + +/// Builder for creating an HTB class. +/// +/// Each destination peer gets its own HTB class with effectively unlimited +/// rate and ceil (~10 Gbit/s). The class serves as the attachment point for +/// TBF and netem qdiscs that implement actual impairments. +/// +/// With `prio=0` for all classes, HTB uses deficit-based round-robin among them. +/// +/// # Handle Scheme +/// +/// For a destination peer with ID `N`: +/// - Class handle: `1:(10 + N)` (e.g., peer 2 -> class 1:12) +/// +/// # Example +/// +/// ```no_run +/// use linkem::tc::handle::{ID_OFFSET, QdiscRequestInner}; +/// use linkem::tc::htb::HtbClassRequest; +/// use rtnetlink::packet_route::tc::TcHandle; +/// +/// let if_index = 1; // Network interface index +/// // Create class for traffic to peer 2 +/// let class_minor = ID_OFFSET + 2; // 12 +/// let request = HtbClassRequest::new( +/// QdiscRequestInner::new(if_index) +/// .with_parent(TcHandle::from(0x0001_0000)) // Parent is HTB root (1:0) +/// .with_handle(TcHandle::from((1 << 16) | class_minor)), // 1:12 +/// ).build(); +/// ``` +#[derive(Debug, Clone)] +pub struct HtbClassRequest { + pub inner: QdiscRequestInner, + /// If true, replace an existing class instead of failing if it exists. + pub replace: bool, +} + +impl HtbClassRequest { + /// Create a new HTB class request with unlimited rate/ceil. + pub fn new(inner: QdiscRequestInner) -> Self { + Self { inner, replace: false } + } + + /// Set whether to replace an existing class. + /// + /// When `true`, uses `NLM_F_REPLACE` to update an existing class. + /// When `false` (default), uses `NLM_F_EXCL` to fail if the class exists. + pub fn with_replace(mut self, replace: bool) -> Self { + self.replace = replace; + self + } + + /// Build the netlink message to create this HTB class. + pub fn build(self) -> NetlinkMessage { + let mut tc_message = TcMessage::with_index(self.inner.interface_index); + tc_message.header.parent = self.inner.parent; + tc_message.header.handle = self.inner.handle; + + tc_message.attributes.push(TcAttribute::Kind("htb".to_string())); + + let buffer_ticks = compute_buffer_ticks(); + + let rate_spec = TcRateSpec { + rate: HTB_UNLIMITED_RATE_BPS, + linklayer: 1, // TC_LINKLAYER_ETHERNET + cell_align: -1, + ..Default::default() + }; + + let opt = HtbOpt { + rate: rate_spec, + ceil: rate_spec, + buffer: buffer_ticks, + cbuffer: buffer_ticks, + quantum: 0, // Let kernel compute from rate + level: 0, // Leaf class + prio: 0, // All classes same priority + }; + + // HTB class options: TCA_HTB_PARMS + TCA_HTB_RTAB + TCA_HTB_CTAB in TCA_OPTIONS + let parms_nla = build_nla(TCA_HTB_PARMS, &opt.as_bytes()); + let rtab_nla = build_nla(TCA_HTB_RTAB, &DEFAULT_RATE_TABLE); + let ctab_nla = build_nla(TCA_HTB_CTAB, &DEFAULT_RATE_TABLE); + + let mut combined = parms_nla; + combined.extend(rtab_nla); + combined.extend(ctab_nla); + tc_message.attributes.push(TcAttribute::Other(build_nested_options(combined))); + + let mut nl_req = NetlinkMessage::from(RouteNetlinkMessage::NewTrafficClass(tc_message)); + nl_req.header.flags = if self.replace { + NLM_F_CREATE | NLM_F_REPLACE | NLM_F_REQUEST | NLM_F_ACK + } else { + NLM_F_CREATE | NLM_F_EXCL | NLM_F_REQUEST | NLM_F_ACK + }; + + nl_req + } +} diff --git a/linkem/src/tc/impairment.rs b/linkem/src/tc/impairment.rs index 30666a2..7560e6f 100644 --- a/linkem/src/tc/impairment.rs +++ b/linkem/src/tc/impairment.rs @@ -41,7 +41,7 @@ use super::core::MTU_ETHERNET; /// 4. When tokens are exhausted, packets queue (up to `tbf_limit_bytes`) /// 5. If the queue overflows, packets are dropped /// -/// The hierarchy becomes: `DRR class -> TBF -> netem` instead of `DRR class -> netem`. +/// The hierarchy becomes: `HTB class -> TBF -> netem` instead of `HTB class -> netem`. #[derive(Debug, Clone, Copy, PartialEq)] pub struct LinkImpairment { // --------------------------------------------------------------------------------- diff --git a/linkem/src/tc/mod.rs b/linkem/src/tc/mod.rs index c54a71b..9f0b584 100644 --- a/linkem/src/tc/mod.rs +++ b/linkem/src/tc/mod.rs @@ -11,14 +11,19 @@ //! //! ```text //! ┌─────────────────────────────────────────────────────────────────────────────┐ -//! │ DRR Root Qdisc (1:0) │ +//! │ HTB Root Qdisc (1:0) │ //! │ │ -//! │ Deficit Round Robin (DRR) serves as our root classifier. It allows us │ -//! │ to create an arbitrary number of classes, one per destination peer. │ -//! │ Each class can have its own chain of qdiscs for traffic shaping. │ +//! │ Hierarchical Token Bucket (HTB) serves as our root classifier. It │ +//! │ allows us to create an arbitrary number of classes, one per destination │ +//! │ peer. Each class can have its own chain of qdiscs for traffic shaping. │ //! │ │ -//! │ DRR is used purely for classification, not bandwidth shaping. With a │ -//! │ large quantum (4GiB), it acts as a simple packet router to child qdiscs. │ +//! │ HTB is used purely for classification, not bandwidth shaping. All │ +//! │ classes have effectively unlimited rate (~10 Gbit/s) and same priority, │ +//! │ so HTB fairly round-robins between child qdiscs. │ +//! │ │ +//! │ HTB was chosen over DRR because it correctly handles non-work-conserving │ +//! │ children (netem with delay). When netem returns NULL, HTB tries the next │ +//! │ class instead of blocking—preventing head-of-line blocking. │ //! └─────────────────────────────────────────────────────────────────────────────┘ //! │ //! ┌───────────────────────┼───────────────────────┐ @@ -27,7 +32,7 @@ //! ┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐ //! │ Class 1:1 │ │ Class 1:11 │ │ Class 1:12 │ //! │ (default) │ │ (dest=peer 1) │ │ (dest=peer 2) │ -//! │ quantum=4GiB │ │ quantum=4GiB │ │ quantum=4GiB │ +//! │ rate=unlimited │ │ rate=unlimited │ │ rate=unlimited │ //! │ │ │ │ │ │ //! │ Unimpaired │ │ Impaired path │ │ Impaired path │ //! │ traffic │ │ to peer 1 │ │ to peer 2 │ @@ -58,7 +63,7 @@ //! //! | Component | Handle | Example (peer_id=2) | //! |------------------|-----------------|---------------------| -//! | DRR root | `1:0` | `N/A` | +//! | HTB root | `1:0` | `N/A` | //! | Default class | `1:1` | `N/A` | //! | Per-dest class | `1:(10+id)` | `1:12` | //! | TBF qdisc | `(10+id):0` | `12:0` | @@ -66,31 +71,37 @@ //! //! ## Packet Flow //! -//! 1. Packet enters DRR root qdisc +//! 1. Packet enters HTB root qdisc //! 2. Flower filter examines destination IP -//! 3. If destination matches a configured peer → route to that peer's class -//! 4. Otherwise → route to default class (1:1, no impairment) +//! 3. If destination matches a configured peer -> route to that peer's class +//! 4. Otherwise -> route to default class (1:1, no impairment) //! 5. In the peer's class: TBF applies rate limiting (if configured) //! 6. Then netem applies delay, loss, jitter, etc. //! -//! ## Why DRR? +//! ## Why HTB? +//! +//! HTB (Hierarchical Token Bucket) was chosen over DRR (Deficit Round Robin) because +//! of a fundamental head-of-line blocking interaction between DRR and netem: +//! +//! DRR's `dequeue()` peeks at the first active class. When netem's peek returns NULL +//! (delay hasn't elapsed), DRR immediately returns without trying other classes. This +//! causes high-delay destinations to block low-delay destinations. //! -//! DRR (Deficit Round Robin) is the simplest classful qdisc that supports dynamic class -//! creation. Unlike HTB (Hierarchical Token Bucket), it doesn't impose bandwidth shaping -//! semantics at the classification layer. +//! HTB's `dequeue()` correctly handles this: when a child qdisc returns NULL, HTB +//! advances to the next class via `htb_next_rb_node()`. This prevents head-of-line +//! blocking entirely. //! -//! With a large quantum (4GiB), DRR acts as a pure packet router—when a class's turn comes, -//! it drains its queue entirely before moving on. Since TBF is the actual bottleneck, -//! DRR's round-robin scheduling rarely activates; packets flow directly to their -//! destination's TBF/netem chain. +//! With all classes at the same priority and effectively unlimited rate (~10 Gbit/s), +//! HTB acts as a fair classifier that correctly interleaves traffic across destinations +//! with different delays. //! //! This clean separation of concerns means: -//! - **DRR**: Classification only (route packets to the right child qdisc) +//! - **HTB**: Fair classification (route packets to the right child qdisc) //! - **TBF**: Rate limiting (enforce bandwidth caps) //! - **Netem**: Network emulation (add delay, loss, jitter) pub mod core; -pub mod drr; +pub mod htb; pub mod filter; pub mod handle; pub mod impairment; diff --git a/linkem/src/tc/netem.rs b/linkem/src/tc/netem.rs index a757c3e..babcc53 100644 --- a/linkem/src/tc/netem.rs +++ b/linkem/src/tc/netem.rs @@ -110,7 +110,7 @@ impl From for NetemQopt { /// /// For destination peer ID `N`: /// - Netem handle: `(20 + N):0` (e.g., peer 2 → handle 22:0) -/// - Parent: TBF `(10 + N):0` if bandwidth limited, else DRR class `1:(10 + N)` +/// - Parent: TBF `(10 + N):0` if bandwidth limited, else HTB class `1:(10 + N)` /// /// # Example /// diff --git a/linkem/src/tc/requests.rs b/linkem/src/tc/requests.rs index 3642ea8..3ef818b 100644 --- a/linkem/src/tc/requests.rs +++ b/linkem/src/tc/requests.rs @@ -10,39 +10,39 @@ use rtnetlink::packet_core::NetlinkPayload; use rtnetlink::packet_route::tc::TcHandle; use crate::network::PeerId; -use crate::tc::drr::{DrrClassRequest, QdiscDrrRequest}; use crate::tc::filter::{FlowerFilterRequest, U32CatchallFilterRequest}; -use crate::tc::handle::{QdiscRequestInner, drr_class_handle, netem_handle, tbf_handle}; +use crate::tc::handle::{QdiscRequestInner, htb_class_handle, netem_handle, tbf_handle}; +use crate::tc::htb::{HtbClassRequest, QdiscHtbRequest}; use crate::tc::impairment::LinkImpairment; use crate::tc::netem::QdiscNetemRequest; use crate::tc::tbf::QdiscTbfRequest; -/// Install DRR root qdisc with default class and catch-all filter. +/// Install HTB root qdisc with default class and catch-all filter. /// /// This sets up the root qdisc hierarchy on a peer's veth interface: -/// - DRR root qdisc (1:0) +/// - HTB root qdisc (1:0) with `defcls=1` /// - Default class (1:1) for unimpaired traffic /// - U32 catch-all filter to route unclassified packets to 1:1 -pub async fn install_drr_root( +pub async fn install_htb_root( handle: &mut rtnetlink::Handle, if_index: i32, ) -> std::result::Result<(), rtnetlink::Error> { - tracing::debug!("installing drr root qdisc"); + tracing::debug!("installing htb root qdisc"); - let drr_request = QdiscDrrRequest::new(QdiscRequestInner::new(if_index)).build(); + let htb_request = QdiscHtbRequest::new(QdiscRequestInner::new(if_index)).build(); - let mut res = handle.request(drr_request)?; + let mut res = handle.request(htb_request)?; while let Some(res) = res.next().await { if let NetlinkPayload::Error(e) = res.payload { - tracing::debug!(?e, "failed to create drr root qdisc"); + tracing::debug!(?e, "failed to create htb root qdisc"); return Err(rtnetlink::Error::NetlinkError(e)); } } // Create the default class (1:1) for unimpaired traffic. - let default_class_request = DrrClassRequest::new( + let default_class_request = HtbClassRequest::new( QdiscRequestInner::new(if_index) - .with_parent(TcHandle::from(0x0001_0000)) // Parent: drr root + .with_parent(TcHandle::from(0x0001_0000)) // Parent: HTB root .with_handle(TcHandle::from(0x0001_0001)), // Handle: 1:1 ) .build(); @@ -50,7 +50,7 @@ pub async fn install_drr_root( let mut res = handle.request(default_class_request)?; while let Some(res) = res.next().await { if let NetlinkPayload::Error(e) = res.payload { - tracing::debug!(?e, "failed to create default drr class"); + tracing::debug!(?e, "failed to create default htb class"); return Err(rtnetlink::Error::NetlinkError(e)); } } @@ -71,30 +71,30 @@ pub async fn install_drr_root( } } - tracing::debug!("drr root qdisc, default class, and catch-all filter installed"); + tracing::debug!("htb root qdisc, default class, and catch-all filter installed"); Ok(()) } -/// Create or replace a DRR class for a destination peer. +/// Create or replace an HTB class for a destination peer. /// /// Each destination peer gets its own class with handle 1:(10 + peer_id). -pub async fn configure_drr_class( +pub async fn configure_htb_class( handle: &mut rtnetlink::Handle, if_index: i32, dst_peer_id: PeerId, is_replacement: bool, ) -> std::result::Result<(), rtnetlink::Error> { - let class_handle = drr_class_handle(dst_peer_id); + let class_handle = htb_class_handle(dst_peer_id); tracing::debug!( dst_peer_id, class_handle = format!("{:x}", class_handle), is_replacement, - "creating drr class for destination" + "creating htb class for destination" ); - let class_request = DrrClassRequest::new( + let class_request = HtbClassRequest::new( QdiscRequestInner::new(if_index) - .with_parent(TcHandle::from(0x0001_0000)) // Parent: drr root (1:0) + .with_parent(TcHandle::from(0x0001_0000)) // Parent: HTB root (1:0) .with_handle(TcHandle::from(class_handle)), ) .with_replace(is_replacement) @@ -103,7 +103,7 @@ pub async fn configure_drr_class( let mut res = handle.request(class_request)?; while let Some(res) = res.next().await { if let NetlinkPayload::Error(e) = res.payload { - tracing::debug!(?e, "failed to create drr class"); + tracing::debug!(?e, "failed to create htb class"); return Err(rtnetlink::Error::NetlinkError(e)); } } @@ -115,7 +115,7 @@ pub async fn configure_drr_class( /// /// Returns the parent handle that netem should attach to: /// - If TBF is created: returns the TBF handle -/// - If no bandwidth limit: returns the DRR class handle +/// - If no bandwidth limit: returns the HTB class handle pub async fn configure_tbf( handle: &mut rtnetlink::Handle, if_index: i32, @@ -123,7 +123,7 @@ pub async fn configure_tbf( impairment: &LinkImpairment, is_replacement: bool, ) -> std::result::Result { - let class_handle = drr_class_handle(dst_peer_id); + let class_handle = htb_class_handle(dst_peer_id); let Some(mut tbf_request) = QdiscTbfRequest::try_new( QdiscRequestInner::new(if_index) @@ -131,7 +131,7 @@ pub async fn configure_tbf( .with_handle(TcHandle::from(tbf_handle(dst_peer_id))), impairment, ) else { - // No bandwidth limiting - netem attaches directly to DRR class + // No bandwidth limiting - netem attaches directly to HTB class return Ok(TcHandle::from(class_handle)); }; @@ -207,7 +207,7 @@ pub async fn configure_flower_filter( dst_peer_id: PeerId, dst_ip: IpAddr, ) -> std::result::Result<(), rtnetlink::Error> { - let class_handle = drr_class_handle(dst_peer_id); + let class_handle = htb_class_handle(dst_peer_id); tracing::debug!( dst_ip = %dst_ip, class_id = format!("1:{}", class_handle & 0xFFFF), diff --git a/linkem/src/tc/tbf.rs b/linkem/src/tc/tbf.rs index 182b250..a51cc3f 100644 --- a/linkem/src/tc/tbf.rs +++ b/linkem/src/tc/tbf.rs @@ -1,7 +1,7 @@ //! Token Bucket Filter (TBF) qdisc support. //! //! TBF provides rate limiting by implementing a token bucket algorithm. -//! It's inserted between the DRR class and netem when bandwidth limiting is configured. +//! It's inserted between the HTB class and netem when bandwidth limiting is configured. use rtnetlink::packet_core::{ NLM_F_ACK, NLM_F_CREATE, NLM_F_EXCL, NLM_F_REPLACE, NLM_F_REQUEST, NetlinkMessage, @@ -244,7 +244,7 @@ impl TbfQopt { /// Builder for creating a TBF (Token Bucket Filter) qdisc. /// /// TBF provides rate limiting by implementing a token bucket algorithm. -/// It's inserted between the DRR class and netem when bandwidth limiting +/// It's inserted between the HTB class and netem when bandwidth limiting /// is configured. /// /// # How Token Bucket Works @@ -260,7 +260,7 @@ impl TbfQopt { /// /// For destination peer ID `N`: /// - TBF handle: `(10 + N):0` (e.g., peer 2 → handle 12:0) -/// - Parent: DRR class `1:(10 + N)` +/// - Parent: HTB class `1:(10 + N)` /// /// # Example ///