diff --git a/Cargo.lock b/Cargo.lock index f47ac348..eeab52a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1522,6 +1522,7 @@ dependencies = [ "tempfile", "textwrap", "uu_free", + "uu_hugetop", "uu_pgrep", "uu_pidof", "uu_pidwait", @@ -2344,6 +2345,15 @@ dependencies = [ "windows", ] +[[package]] +name = "uu_hugetop" +version = "0.0.1" +dependencies = [ + "clap", + "tempfile", + "uucore", +] + [[package]] name = "uu_pgrep" version = "0.0.1" diff --git a/Cargo.toml b/Cargo.toml index 4a02eea0..33948491 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,3 +1,5 @@ +[workspace] +members = ["src/uu/hugetop"] # procps (uutils) # * see the repository LICENSE, README, and CONTRIBUTING files for more information @@ -33,6 +35,7 @@ uudoc = [] feat_common_core = [ "free", + "hugetop", "pgrep", "pidof", "pidwait", @@ -94,6 +97,7 @@ uucore = { workspace = true } # free = { optional = true, version = "0.0.1", package = "uu_free", path = "src/uu/free" } +hugetop = { optional = true, version = "0.0.1", package = "uu_hugetop", path = "src/uu/hugetop" } pgrep = { optional = true, version = "0.0.1", package = "uu_pgrep", path = "src/uu/pgrep" } pidof = { optional = true, version = "0.0.1", package = "uu_pidof", path = "src/uu/pidof" } pidwait = { optional = true, version = "0.0.1", package = "uu_pidwait", path = "src/uu/pidwait" } diff --git a/README.md b/README.md index 2a1d0bf9..37580ee4 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ Provides command line and full screen utilities for browsing procfs, a "pseudo" Ongoing: * `free`: Shows the amount of free and used memory in the system. +* `hugetop`: Report hugepage usage of processes and the system as a whole. * `pgrep`: Searches for processes based on name and other attributes. * `pidof`: Find the process ID of a running program. * `pidwait`: Waits for a specific process to terminate. @@ -30,9 +31,6 @@ Ongoing: * `w`: Shows who is logged on and what they are doing. * `watch`: Executes a program periodically, showing output fullscreen. -TODO: -* `hugetop`: Report hugepage usage of processes and the system as a whole. - Elsewhere: * `kill` is already implemented in https://github.com/uutils/coreutils diff --git a/src/uu/hugetop/Cargo.toml b/src/uu/hugetop/Cargo.toml new file mode 100644 index 00000000..aa0eea47 --- /dev/null +++ b/src/uu/hugetop/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "uu_hugetop" +description = "hugetop ~ (uutils) Report hugepage usage for the system and processes" +repository = "https://github.com/uutils/procps/tree/main/src/uu/hugetop" +authors.workspace = true +categories.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +clap = { workspace = true } +uucore = { workspace = true } + +[dev-dependencies] +tempfile = { workspace = true } + +[lib] +path = "src/hugetop.rs" + +[[bin]] +name = "hugetop" +path = "src/main.rs" diff --git a/src/uu/hugetop/src/hugetop.rs b/src/uu/hugetop/src/hugetop.rs new file mode 100644 index 00000000..0edde723 --- /dev/null +++ b/src/uu/hugetop/src/hugetop.rs @@ -0,0 +1,456 @@ +// This file is part of the uutils procps package. +// +// For the full copyright and license information, please view the LICENSE +// file that was distributed with this source code. + +use clap::{value_parser, Arg, Command}; +use std::collections::BTreeMap; +use std::fs; +use std::path::Path; +use std::thread::sleep; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use uucore::error::UResult; + +const DEFAULT_HUGEPAGES_ROOT: &str = "/sys/kernel/mm/hugepages"; +const SYS_NODES_ROOT: &str = "/sys/devices/system/node"; +const DEFAULT_PROC_ROOT: &str = "/proc"; + +#[derive(Debug, Clone, PartialEq, Eq)] +struct HugePagePool { + size_kb: u64, + total_pages: u64, + free_pages: u64, + reserved_pages: u64, + surplus_pages: u64, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +struct ProcessHugeUsage { + pid: u32, + command: String, + anon_huge_kb: u64, + shared_hugetlb_kb: u64, + private_hugetlb_kb: u64, +} + +impl ProcessHugeUsage { + fn total_kb(&self) -> u64 { + self.anon_huge_kb + self.shared_hugetlb_kb + self.private_hugetlb_kb + } +} + +#[uucore::main] +pub fn uumain(args: impl uucore::Args) -> UResult<()> { + let matches = uu_app().try_get_matches_from(args)?; + + let limit = matches.get_one::("lines").copied(); + let numa = matches.get_flag("numa"); + let human = matches.get_flag("human"); + let once = matches.get_flag("once"); + let delay = *matches.get_one::("delay").unwrap_or(&0); + + if once || delay == 0 { + run(numa, human, limit) + } else { + loop { + // Clear the terminal to roughly approximate hugetop's screen refresh behavior. + print!("\x1B[2J\x1B[H"); + run(numa, human, limit)?; + sleep(Duration::from_secs(delay)); + } + } +} + +fn run(numa: bool, human: bool, limit: Option) -> UResult<()> { + print_summary(numa, human); + print_headings(); + print_procs(human, limit); + Ok(()) +} + +pub fn uu_app() -> Command { + Command::new(uucore::util_name()) + .about("Report hugepage usage of processes and the system as a whole") + .arg( + Arg::new("delay") + .short('d') + .long("delay") + .value_name("SECONDS") + .help("Delay between updates (0 = run once)") + .value_parser(value_parser!(u64)), + ) + .arg( + Arg::new("numa") + .short('n') + .long("numa") + .help("Display per NUMA node huge page information") + .action(clap::ArgAction::SetTrue), + ) + .arg( + Arg::new("once") + .short('o') + .long("once") + .help("Only display once, then exit") + .action(clap::ArgAction::SetTrue), + ) + .arg( + Arg::new("human") + .short('H') + .long("human") + .help("Display human-readable output") + .action(clap::ArgAction::SetTrue), + ) + .arg( + Arg::new("lines") + .short('l') + .long("lines") + .value_name("N") + .help("Show the top N processes") + .value_parser(value_parser!(usize)), + ) +} + +fn print_summary(numa: bool, human: bool) { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or_default(); + + println!("hugetop - {}", now); + + let pools = match read_node_hugepage_pools() { + Ok(nodes) if numa => { + for (node, pools) in &nodes { + print_node(node, pools, human); + } + return; + } + Ok(nodes) => merge_node_pools(&nodes), + Err(_) => Vec::new(), + }; + + if pools.is_empty() { + let pools = read_hugepage_pools(Path::new(DEFAULT_HUGEPAGES_ROOT)).unwrap_or_default(); + if pools.is_empty() { + println!("(no hugepage pools found)"); + return; + } + print_node("node(s)", &pools, human); + } else { + print_node("node(s)", &pools, human); + } +} + +fn print_headings() { + println!("{:>8} {:>10} {:>10} COMMAND", "PID", "SHARED", "PRIVATE"); +} + +fn print_procs(human: bool, limit: Option) { + let mut processes = match read_process_hugepage_usage(Path::new(DEFAULT_PROC_ROOT)) { + Ok(p) => p, + Err(_) => Vec::new(), + }; + + processes.sort_by_key(|usage| std::cmp::Reverse(usage.total_kb())); + + let mut shown = 0usize; + let limit = limit.unwrap_or(processes.len()); + + for usage in processes { + if shown >= limit { + break; + } + + let shared = format_kb(usage.shared_hugetlb_kb, human); + let private = format_kb(usage.private_hugetlb_kb, human); + + println!( + "{:>8} {:>10} {:>10} {}", + usage.pid, shared, private, usage.command + ); + + shown += 1; + } +} + +fn print_node(node: &str, pools: &[HugePagePool], human: bool) { + let mut line = String::new(); + line.push_str(node); + line.push_str(":"); + + for (i, pool) in pools.iter().enumerate() { + if i > 0 { + line.push_str(","); + } + + let size = if human { + humanized(pool.size_kb, false) + } else { + format!("{}kB", pool.size_kb) + }; + + line.push_str(&format!(" {} - {}/{}", size, pool.free_pages, pool.total_pages)); + } + + println!("{}", line); +} + +fn format_kb(kb: u64, human: bool) -> String { + if human { + humanized(kb, false) + } else { + format!("{}", kb) + } +} + +fn humanized(kib: u64, si: bool) -> String { + let b = kib * 1024; + let units = ['B', 'K', 'M', 'G', 'T', 'P']; + let mut level = 0; + let mut divisor = 1u64; + + while level < units.len() - 1 && divisor * 100 <= b { + divisor *= if si { 1000 } else { 1024 }; + level += 1; + } + + if level == 0 { + return format!("{}{}", b, units[level]); + } + + let value = (b as f64) / (divisor as f64); + let formatted_value = if (value * 10.0).round() < 100.0 { + format!("{:.1}", (value * 10.0).round() / 10.0) + } else { + (value as u64).to_string() + }; + + format!( + "{}{}{}", + formatted_value, + units[level].to_owned(), + if si { "" } else { "i" } + ) +} + +fn read_node_hugepage_pools() -> UResult)>> { + let mut nodes = Vec::new(); + let Ok(entries) = fs::read_dir(SYS_NODES_ROOT) else { + return Ok(nodes); + }; + + for entry in entries.flatten() { + let file_name = entry.file_name(); + let name = match file_name.to_str() { + Some(n) if n.starts_with("node") => n.to_string(), + _ => continue, + }; + + let path = entry.path().join("hugepages"); + if !path.is_dir() { + continue; + } + + let pools = read_hugepage_pools(&path)?; + if pools.is_empty() { + continue; + } + + nodes.push((name, pools)); + } + + Ok(nodes) +} + +fn merge_node_pools(nodes: &[(String, Vec)]) -> Vec { + let mut map: BTreeMap = BTreeMap::new(); + + for (_, pools) in nodes { + for pool in pools { + let entry = map.entry(pool.size_kb).or_insert_with(|| HugePagePool { + size_kb: pool.size_kb, + total_pages: 0, + free_pages: 0, + reserved_pages: 0, + surplus_pages: 0, + }); + entry.total_pages += pool.total_pages; + entry.free_pages += pool.free_pages; + entry.reserved_pages += pool.reserved_pages; + entry.surplus_pages += pool.surplus_pages; + } + } + + map.into_values().collect() +} + +fn read_hugepage_pools(root: &Path) -> UResult> { + let mut pools = Vec::new(); + + let Ok(entries) = fs::read_dir(root) else { + return Ok(pools); + }; + + for entry in entries.flatten() { + let path = entry.path(); + let Some(name) = path.file_name().and_then(|s| s.to_str()) else { + continue; + }; + + let Some(size_kb) = parse_hugepage_dir_name(name) else { + continue; + }; + + let total_pages = read_u64(path.join("nr_hugepages")); + let free_pages = read_u64(path.join("free_hugepages")); + let reserved_pages = read_u64(path.join("resv_hugepages")); + let surplus_pages = read_u64(path.join("surplus_hugepages")); + + pools.push(HugePagePool { + size_kb, + total_pages, + free_pages, + reserved_pages, + surplus_pages, + }); + } + + pools.sort_by_key(|pool| pool.size_kb); + Ok(pools) +} + +fn read_process_hugepage_usage(root: &Path) -> UResult> { + let mut usages = Vec::new(); + + let Ok(entries) = fs::read_dir(root) else { + return Ok(usages); + }; + + for entry in entries.flatten() { + let path = entry.path(); + let Some(file_name) = path.file_name().and_then(|name| name.to_str()) else { + continue; + }; + + let Ok(pid) = file_name.parse::() else { + continue; + }; + + let Some((_, shared_hugetlb_kb, private_hugetlb_kb)) = + parse_smaps_rollup(&path.join("smaps_rollup")) + else { + continue; + }; + + let total_kb = shared_hugetlb_kb + private_hugetlb_kb; + if total_kb == 0 { + continue; + } + + let command = fs::read_to_string(path.join("comm")) + .unwrap_or_else(|_| String::from("?")) + .trim() + .to_string(); + + usages.push(ProcessHugeUsage { + pid, + command, + anon_huge_kb: 0, + shared_hugetlb_kb, + private_hugetlb_kb, + }); + } + + Ok(usages) +} + +fn parse_hugepage_dir_name(name: &str) -> Option { + let prefix = "hugepages-"; + let suffix = "kB"; + + if !name.starts_with(prefix) || !name.ends_with(suffix) { + return None; + } + + name[prefix.len()..name.len() - suffix.len()] + .parse::() + .ok() +} + +fn parse_smaps_rollup(path: &Path) -> Option<(u64, u64, u64)> { + let content = fs::read_to_string(path).ok()?; + + let mut anon_huge_kb = 0; + let mut shared_hugetlb_kb = 0; + let mut private_hugetlb_kb = 0; + + for line in content.lines() { + if let Some(value) = parse_kb_field(line, "AnonHugePages:") { + anon_huge_kb = value; + } else if let Some(value) = parse_kb_field(line, "Shared_Hugetlb:") { + shared_hugetlb_kb = value; + } else if let Some(value) = parse_kb_field(line, "Private_Hugetlb:") { + private_hugetlb_kb = value; + } + } + + Some((anon_huge_kb, shared_hugetlb_kb, private_hugetlb_kb)) +} + +fn parse_kb_field(line: &str, field: &str) -> Option { + let value = line.strip_prefix(field)?.trim(); + let number = value.split_whitespace().next()?; + number.parse::().ok() +} + +fn read_u64(path: impl AsRef) -> u64 { + fs::read_to_string(path) + .ok() + .and_then(|s| s.trim().parse::().ok()) + .unwrap_or(0) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + + #[test] + fn parse_hugepage_name_works() { + assert_eq!(parse_hugepage_dir_name("hugepages-2048kB"), Some(2048)); + assert_eq!( + parse_hugepage_dir_name("hugepages-1048576kB"), + Some(1_048_576) + ); + assert_eq!(parse_hugepage_dir_name("hugepages-foo"), None); + } + + #[test] + fn parse_smaps_rollup_works() { + let dir = tempfile::tempdir().unwrap(); + let file = dir.path().join("smaps_rollup"); + let mut f = fs::File::create(&file).unwrap(); + writeln!(f, "AnonHugePages: 512 kB").unwrap(); + writeln!(f, "Shared_Hugetlb: 64 kB").unwrap(); + writeln!(f, "Private_Hugetlb: 32 kB").unwrap(); + + assert_eq!(parse_smaps_rollup(&file), Some((512, 64, 32))); + } + + #[test] + fn reads_pools_from_tree() { + let dir = tempfile::tempdir().unwrap(); + let pool = dir.path().join("hugepages-2048kB"); + fs::create_dir(&pool).unwrap(); + fs::write(pool.join("nr_hugepages"), "10\n").unwrap(); + fs::write(pool.join("free_hugepages"), "3\n").unwrap(); + fs::write(pool.join("resv_hugepages"), "2\n").unwrap(); + fs::write(pool.join("surplus_hugepages"), "1\n").unwrap(); + + let pools = read_hugepage_pools(dir.path()).unwrap(); + assert_eq!(pools.len(), 1); + assert_eq!(pools[0].size_kb, 2048); + assert_eq!(pools[0].total_pages, 10); + assert_eq!(pools[0].free_pages, 3); + } +} diff --git a/src/uu/hugetop/src/main.rs b/src/uu/hugetop/src/main.rs new file mode 100644 index 00000000..1e6f1470 --- /dev/null +++ b/src/uu/hugetop/src/main.rs @@ -0,0 +1 @@ +uucore::bin!(uu_hugetop); diff --git a/tests/by-util/test_hugetop.rs b/tests/by-util/test_hugetop.rs new file mode 100644 index 00000000..e78957e1 --- /dev/null +++ b/tests/by-util/test_hugetop.rs @@ -0,0 +1,21 @@ +// This file is part of the uutils procps package. +// +// For the full copyright and license information, please view the LICENSE +// file that was distributed with this source code. + +use uutests::new_ucmd; + +#[test] +fn runs_successfully() { + new_ucmd!().succeeds(); +} + +#[test] +fn supports_lines_option() { + new_ucmd!().arg("-l").arg("1").succeeds(); +} + +#[test] +fn supports_numa_option() { + new_ucmd!().arg("-n").succeeds(); +} diff --git a/tests/tests.rs b/tests/tests.rs index 8df70274..a27a4294 100644 --- a/tests/tests.rs +++ b/tests/tests.rs @@ -23,6 +23,10 @@ mod test_pwdx; #[path = "by-util/test_free.rs"] mod test_free; +#[cfg(feature = "hugetop")] +#[path = "by-util/test_hugetop.rs"] +mod test_hugetop; + #[cfg(feature = "w")] #[path = "by-util/test_w.rs"] mod test_w;